##// END OF EJS Templates
move the reading of branch.cache from _branchtags to branchtags
Alexis S. L. Carvalho -
r6120:f89878df default
parent child Browse files
Show More
@@ -1,2350 +1,2347 b''
1 # queue.py - patch queues for mercurial
1 # queue.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial import commands, cmdutil, hg, patch, revlog, util
33 from mercurial import commands, cmdutil, hg, patch, revlog, util
34 from mercurial import repair
34 from mercurial import repair
35 import os, sys, re, errno
35 import os, sys, re, errno
36
36
37 commands.norepo += " qclone"
37 commands.norepo += " qclone"
38
38
39 # Patch names looks like unix-file names.
39 # Patch names looks like unix-file names.
40 # They must be joinable with queue directory and result in the patch path.
40 # They must be joinable with queue directory and result in the patch path.
41 normname = util.normpath
41 normname = util.normpath
42
42
43 class statusentry:
43 class statusentry:
44 def __init__(self, rev, name=None):
44 def __init__(self, rev, name=None):
45 if not name:
45 if not name:
46 fields = rev.split(':', 1)
46 fields = rev.split(':', 1)
47 if len(fields) == 2:
47 if len(fields) == 2:
48 self.rev, self.name = fields
48 self.rev, self.name = fields
49 else:
49 else:
50 self.rev, self.name = None, None
50 self.rev, self.name = None, None
51 else:
51 else:
52 self.rev, self.name = rev, name
52 self.rev, self.name = rev, name
53
53
54 def __str__(self):
54 def __str__(self):
55 return self.rev + ':' + self.name
55 return self.rev + ':' + self.name
56
56
57 class queue:
57 class queue:
58 def __init__(self, ui, path, patchdir=None):
58 def __init__(self, ui, path, patchdir=None):
59 self.basepath = path
59 self.basepath = path
60 self.path = patchdir or os.path.join(path, "patches")
60 self.path = patchdir or os.path.join(path, "patches")
61 self.opener = util.opener(self.path)
61 self.opener = util.opener(self.path)
62 self.ui = ui
62 self.ui = ui
63 self.applied = []
63 self.applied = []
64 self.full_series = []
64 self.full_series = []
65 self.applied_dirty = 0
65 self.applied_dirty = 0
66 self.series_dirty = 0
66 self.series_dirty = 0
67 self.series_path = "series"
67 self.series_path = "series"
68 self.status_path = "status"
68 self.status_path = "status"
69 self.guards_path = "guards"
69 self.guards_path = "guards"
70 self.active_guards = None
70 self.active_guards = None
71 self.guards_dirty = False
71 self.guards_dirty = False
72 self._diffopts = None
72 self._diffopts = None
73
73
74 if os.path.exists(self.join(self.series_path)):
74 if os.path.exists(self.join(self.series_path)):
75 self.full_series = self.opener(self.series_path).read().splitlines()
75 self.full_series = self.opener(self.series_path).read().splitlines()
76 self.parse_series()
76 self.parse_series()
77
77
78 if os.path.exists(self.join(self.status_path)):
78 if os.path.exists(self.join(self.status_path)):
79 lines = self.opener(self.status_path).read().splitlines()
79 lines = self.opener(self.status_path).read().splitlines()
80 self.applied = [statusentry(l) for l in lines]
80 self.applied = [statusentry(l) for l in lines]
81
81
82 def diffopts(self):
82 def diffopts(self):
83 if self._diffopts is None:
83 if self._diffopts is None:
84 self._diffopts = patch.diffopts(self.ui)
84 self._diffopts = patch.diffopts(self.ui)
85 return self._diffopts
85 return self._diffopts
86
86
87 def join(self, *p):
87 def join(self, *p):
88 return os.path.join(self.path, *p)
88 return os.path.join(self.path, *p)
89
89
90 def find_series(self, patch):
90 def find_series(self, patch):
91 pre = re.compile("(\s*)([^#]+)")
91 pre = re.compile("(\s*)([^#]+)")
92 index = 0
92 index = 0
93 for l in self.full_series:
93 for l in self.full_series:
94 m = pre.match(l)
94 m = pre.match(l)
95 if m:
95 if m:
96 s = m.group(2)
96 s = m.group(2)
97 s = s.rstrip()
97 s = s.rstrip()
98 if s == patch:
98 if s == patch:
99 return index
99 return index
100 index += 1
100 index += 1
101 return None
101 return None
102
102
103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
104
104
105 def parse_series(self):
105 def parse_series(self):
106 self.series = []
106 self.series = []
107 self.series_guards = []
107 self.series_guards = []
108 for l in self.full_series:
108 for l in self.full_series:
109 h = l.find('#')
109 h = l.find('#')
110 if h == -1:
110 if h == -1:
111 patch = l
111 patch = l
112 comment = ''
112 comment = ''
113 elif h == 0:
113 elif h == 0:
114 continue
114 continue
115 else:
115 else:
116 patch = l[:h]
116 patch = l[:h]
117 comment = l[h:]
117 comment = l[h:]
118 patch = patch.strip()
118 patch = patch.strip()
119 if patch:
119 if patch:
120 if patch in self.series:
120 if patch in self.series:
121 raise util.Abort(_('%s appears more than once in %s') %
121 raise util.Abort(_('%s appears more than once in %s') %
122 (patch, self.join(self.series_path)))
122 (patch, self.join(self.series_path)))
123 self.series.append(patch)
123 self.series.append(patch)
124 self.series_guards.append(self.guard_re.findall(comment))
124 self.series_guards.append(self.guard_re.findall(comment))
125
125
126 def check_guard(self, guard):
126 def check_guard(self, guard):
127 bad_chars = '# \t\r\n\f'
127 bad_chars = '# \t\r\n\f'
128 first = guard[0]
128 first = guard[0]
129 for c in '-+':
129 for c in '-+':
130 if first == c:
130 if first == c:
131 return (_('guard %r starts with invalid character: %r') %
131 return (_('guard %r starts with invalid character: %r') %
132 (guard, c))
132 (guard, c))
133 for c in bad_chars:
133 for c in bad_chars:
134 if c in guard:
134 if c in guard:
135 return _('invalid character in guard %r: %r') % (guard, c)
135 return _('invalid character in guard %r: %r') % (guard, c)
136
136
137 def set_active(self, guards):
137 def set_active(self, guards):
138 for guard in guards:
138 for guard in guards:
139 bad = self.check_guard(guard)
139 bad = self.check_guard(guard)
140 if bad:
140 if bad:
141 raise util.Abort(bad)
141 raise util.Abort(bad)
142 guards = dict.fromkeys(guards).keys()
142 guards = dict.fromkeys(guards).keys()
143 guards.sort()
143 guards.sort()
144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
145 self.active_guards = guards
145 self.active_guards = guards
146 self.guards_dirty = True
146 self.guards_dirty = True
147
147
148 def active(self):
148 def active(self):
149 if self.active_guards is None:
149 if self.active_guards is None:
150 self.active_guards = []
150 self.active_guards = []
151 try:
151 try:
152 guards = self.opener(self.guards_path).read().split()
152 guards = self.opener(self.guards_path).read().split()
153 except IOError, err:
153 except IOError, err:
154 if err.errno != errno.ENOENT: raise
154 if err.errno != errno.ENOENT: raise
155 guards = []
155 guards = []
156 for i, guard in enumerate(guards):
156 for i, guard in enumerate(guards):
157 bad = self.check_guard(guard)
157 bad = self.check_guard(guard)
158 if bad:
158 if bad:
159 self.ui.warn('%s:%d: %s\n' %
159 self.ui.warn('%s:%d: %s\n' %
160 (self.join(self.guards_path), i + 1, bad))
160 (self.join(self.guards_path), i + 1, bad))
161 else:
161 else:
162 self.active_guards.append(guard)
162 self.active_guards.append(guard)
163 return self.active_guards
163 return self.active_guards
164
164
165 def set_guards(self, idx, guards):
165 def set_guards(self, idx, guards):
166 for g in guards:
166 for g in guards:
167 if len(g) < 2:
167 if len(g) < 2:
168 raise util.Abort(_('guard %r too short') % g)
168 raise util.Abort(_('guard %r too short') % g)
169 if g[0] not in '-+':
169 if g[0] not in '-+':
170 raise util.Abort(_('guard %r starts with invalid char') % g)
170 raise util.Abort(_('guard %r starts with invalid char') % g)
171 bad = self.check_guard(g[1:])
171 bad = self.check_guard(g[1:])
172 if bad:
172 if bad:
173 raise util.Abort(bad)
173 raise util.Abort(bad)
174 drop = self.guard_re.sub('', self.full_series[idx])
174 drop = self.guard_re.sub('', self.full_series[idx])
175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
176 self.parse_series()
176 self.parse_series()
177 self.series_dirty = True
177 self.series_dirty = True
178
178
179 def pushable(self, idx):
179 def pushable(self, idx):
180 if isinstance(idx, str):
180 if isinstance(idx, str):
181 idx = self.series.index(idx)
181 idx = self.series.index(idx)
182 patchguards = self.series_guards[idx]
182 patchguards = self.series_guards[idx]
183 if not patchguards:
183 if not patchguards:
184 return True, None
184 return True, None
185 default = False
185 default = False
186 guards = self.active()
186 guards = self.active()
187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
188 if exactneg:
188 if exactneg:
189 return False, exactneg[0]
189 return False, exactneg[0]
190 pos = [g for g in patchguards if g[0] == '+']
190 pos = [g for g in patchguards if g[0] == '+']
191 exactpos = [g for g in pos if g[1:] in guards]
191 exactpos = [g for g in pos if g[1:] in guards]
192 if pos:
192 if pos:
193 if exactpos:
193 if exactpos:
194 return True, exactpos[0]
194 return True, exactpos[0]
195 return False, pos
195 return False, pos
196 return True, ''
196 return True, ''
197
197
198 def explain_pushable(self, idx, all_patches=False):
198 def explain_pushable(self, idx, all_patches=False):
199 write = all_patches and self.ui.write or self.ui.warn
199 write = all_patches and self.ui.write or self.ui.warn
200 if all_patches or self.ui.verbose:
200 if all_patches or self.ui.verbose:
201 if isinstance(idx, str):
201 if isinstance(idx, str):
202 idx = self.series.index(idx)
202 idx = self.series.index(idx)
203 pushable, why = self.pushable(idx)
203 pushable, why = self.pushable(idx)
204 if all_patches and pushable:
204 if all_patches and pushable:
205 if why is None:
205 if why is None:
206 write(_('allowing %s - no guards in effect\n') %
206 write(_('allowing %s - no guards in effect\n') %
207 self.series[idx])
207 self.series[idx])
208 else:
208 else:
209 if not why:
209 if not why:
210 write(_('allowing %s - no matching negative guards\n') %
210 write(_('allowing %s - no matching negative guards\n') %
211 self.series[idx])
211 self.series[idx])
212 else:
212 else:
213 write(_('allowing %s - guarded by %r\n') %
213 write(_('allowing %s - guarded by %r\n') %
214 (self.series[idx], why))
214 (self.series[idx], why))
215 if not pushable:
215 if not pushable:
216 if why:
216 if why:
217 write(_('skipping %s - guarded by %r\n') %
217 write(_('skipping %s - guarded by %r\n') %
218 (self.series[idx], why))
218 (self.series[idx], why))
219 else:
219 else:
220 write(_('skipping %s - no matching guards\n') %
220 write(_('skipping %s - no matching guards\n') %
221 self.series[idx])
221 self.series[idx])
222
222
223 def save_dirty(self):
223 def save_dirty(self):
224 def write_list(items, path):
224 def write_list(items, path):
225 fp = self.opener(path, 'w')
225 fp = self.opener(path, 'w')
226 for i in items:
226 for i in items:
227 fp.write("%s\n" % i)
227 fp.write("%s\n" % i)
228 fp.close()
228 fp.close()
229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
230 if self.series_dirty: write_list(self.full_series, self.series_path)
230 if self.series_dirty: write_list(self.full_series, self.series_path)
231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
232
232
233 def readheaders(self, patch):
233 def readheaders(self, patch):
234 def eatdiff(lines):
234 def eatdiff(lines):
235 while lines:
235 while lines:
236 l = lines[-1]
236 l = lines[-1]
237 if (l.startswith("diff -") or
237 if (l.startswith("diff -") or
238 l.startswith("Index:") or
238 l.startswith("Index:") or
239 l.startswith("===========")):
239 l.startswith("===========")):
240 del lines[-1]
240 del lines[-1]
241 else:
241 else:
242 break
242 break
243 def eatempty(lines):
243 def eatempty(lines):
244 while lines:
244 while lines:
245 l = lines[-1]
245 l = lines[-1]
246 if re.match('\s*$', l):
246 if re.match('\s*$', l):
247 del lines[-1]
247 del lines[-1]
248 else:
248 else:
249 break
249 break
250
250
251 pf = self.join(patch)
251 pf = self.join(patch)
252 message = []
252 message = []
253 comments = []
253 comments = []
254 user = None
254 user = None
255 date = None
255 date = None
256 format = None
256 format = None
257 subject = None
257 subject = None
258 diffstart = 0
258 diffstart = 0
259
259
260 for line in file(pf):
260 for line in file(pf):
261 line = line.rstrip()
261 line = line.rstrip()
262 if line.startswith('diff --git'):
262 if line.startswith('diff --git'):
263 diffstart = 2
263 diffstart = 2
264 break
264 break
265 if diffstart:
265 if diffstart:
266 if line.startswith('+++ '):
266 if line.startswith('+++ '):
267 diffstart = 2
267 diffstart = 2
268 break
268 break
269 if line.startswith("--- "):
269 if line.startswith("--- "):
270 diffstart = 1
270 diffstart = 1
271 continue
271 continue
272 elif format == "hgpatch":
272 elif format == "hgpatch":
273 # parse values when importing the result of an hg export
273 # parse values when importing the result of an hg export
274 if line.startswith("# User "):
274 if line.startswith("# User "):
275 user = line[7:]
275 user = line[7:]
276 elif line.startswith("# Date "):
276 elif line.startswith("# Date "):
277 date = line[7:]
277 date = line[7:]
278 elif not line.startswith("# ") and line:
278 elif not line.startswith("# ") and line:
279 message.append(line)
279 message.append(line)
280 format = None
280 format = None
281 elif line == '# HG changeset patch':
281 elif line == '# HG changeset patch':
282 format = "hgpatch"
282 format = "hgpatch"
283 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 elif (format != "tagdone" and (line.startswith("Subject: ") or
284 line.startswith("subject: "))):
284 line.startswith("subject: "))):
285 subject = line[9:]
285 subject = line[9:]
286 format = "tag"
286 format = "tag"
287 elif (format != "tagdone" and (line.startswith("From: ") or
287 elif (format != "tagdone" and (line.startswith("From: ") or
288 line.startswith("from: "))):
288 line.startswith("from: "))):
289 user = line[6:]
289 user = line[6:]
290 format = "tag"
290 format = "tag"
291 elif format == "tag" and line == "":
291 elif format == "tag" and line == "":
292 # when looking for tags (subject: from: etc) they
292 # when looking for tags (subject: from: etc) they
293 # end once you find a blank line in the source
293 # end once you find a blank line in the source
294 format = "tagdone"
294 format = "tagdone"
295 elif message or line:
295 elif message or line:
296 message.append(line)
296 message.append(line)
297 comments.append(line)
297 comments.append(line)
298
298
299 eatdiff(message)
299 eatdiff(message)
300 eatdiff(comments)
300 eatdiff(comments)
301 eatempty(message)
301 eatempty(message)
302 eatempty(comments)
302 eatempty(comments)
303
303
304 # make sure message isn't empty
304 # make sure message isn't empty
305 if format and format.startswith("tag") and subject:
305 if format and format.startswith("tag") and subject:
306 message.insert(0, "")
306 message.insert(0, "")
307 message.insert(0, subject)
307 message.insert(0, subject)
308 return (message, comments, user, date, diffstart > 1)
308 return (message, comments, user, date, diffstart > 1)
309
309
310 def removeundo(self, repo):
310 def removeundo(self, repo):
311 undo = repo.sjoin('undo')
311 undo = repo.sjoin('undo')
312 if not os.path.exists(undo):
312 if not os.path.exists(undo):
313 return
313 return
314 try:
314 try:
315 os.unlink(undo)
315 os.unlink(undo)
316 except OSError, inst:
316 except OSError, inst:
317 self.ui.warn('error removing undo: %s\n' % str(inst))
317 self.ui.warn('error removing undo: %s\n' % str(inst))
318
318
319 def printdiff(self, repo, node1, node2=None, files=None,
319 def printdiff(self, repo, node1, node2=None, files=None,
320 fp=None, changes=None, opts={}):
320 fp=None, changes=None, opts={}):
321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
322
322
323 patch.diff(repo, node1, node2, fns, match=matchfn,
323 patch.diff(repo, node1, node2, fns, match=matchfn,
324 fp=fp, changes=changes, opts=self.diffopts())
324 fp=fp, changes=changes, opts=self.diffopts())
325
325
326 def mergeone(self, repo, mergeq, head, patch, rev):
326 def mergeone(self, repo, mergeq, head, patch, rev):
327 # first try just applying the patch
327 # first try just applying the patch
328 (err, n) = self.apply(repo, [ patch ], update_status=False,
328 (err, n) = self.apply(repo, [ patch ], update_status=False,
329 strict=True, merge=rev)
329 strict=True, merge=rev)
330
330
331 if err == 0:
331 if err == 0:
332 return (err, n)
332 return (err, n)
333
333
334 if n is None:
334 if n is None:
335 raise util.Abort(_("apply failed for patch %s") % patch)
335 raise util.Abort(_("apply failed for patch %s") % patch)
336
336
337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
338
338
339 # apply failed, strip away that rev and merge.
339 # apply failed, strip away that rev and merge.
340 hg.clean(repo, head)
340 hg.clean(repo, head)
341 self.strip(repo, n, update=False, backup='strip')
341 self.strip(repo, n, update=False, backup='strip')
342
342
343 ctx = repo.changectx(rev)
343 ctx = repo.changectx(rev)
344 ret = hg.merge(repo, rev)
344 ret = hg.merge(repo, rev)
345 if ret:
345 if ret:
346 raise util.Abort(_("update returned %d") % ret)
346 raise util.Abort(_("update returned %d") % ret)
347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
348 if n == None:
348 if n == None:
349 raise util.Abort(_("repo commit failed"))
349 raise util.Abort(_("repo commit failed"))
350 try:
350 try:
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 except:
352 except:
353 raise util.Abort(_("unable to read %s") % patch)
353 raise util.Abort(_("unable to read %s") % patch)
354
354
355 patchf = self.opener(patch, "w")
355 patchf = self.opener(patch, "w")
356 if comments:
356 if comments:
357 comments = "\n".join(comments) + '\n\n'
357 comments = "\n".join(comments) + '\n\n'
358 patchf.write(comments)
358 patchf.write(comments)
359 self.printdiff(repo, head, n, fp=patchf)
359 self.printdiff(repo, head, n, fp=patchf)
360 patchf.close()
360 patchf.close()
361 self.removeundo(repo)
361 self.removeundo(repo)
362 return (0, n)
362 return (0, n)
363
363
364 def qparents(self, repo, rev=None):
364 def qparents(self, repo, rev=None):
365 if rev is None:
365 if rev is None:
366 (p1, p2) = repo.dirstate.parents()
366 (p1, p2) = repo.dirstate.parents()
367 if p2 == revlog.nullid:
367 if p2 == revlog.nullid:
368 return p1
368 return p1
369 if len(self.applied) == 0:
369 if len(self.applied) == 0:
370 return None
370 return None
371 return revlog.bin(self.applied[-1].rev)
371 return revlog.bin(self.applied[-1].rev)
372 pp = repo.changelog.parents(rev)
372 pp = repo.changelog.parents(rev)
373 if pp[1] != revlog.nullid:
373 if pp[1] != revlog.nullid:
374 arevs = [ x.rev for x in self.applied ]
374 arevs = [ x.rev for x in self.applied ]
375 p0 = revlog.hex(pp[0])
375 p0 = revlog.hex(pp[0])
376 p1 = revlog.hex(pp[1])
376 p1 = revlog.hex(pp[1])
377 if p0 in arevs:
377 if p0 in arevs:
378 return pp[0]
378 return pp[0]
379 if p1 in arevs:
379 if p1 in arevs:
380 return pp[1]
380 return pp[1]
381 return pp[0]
381 return pp[0]
382
382
383 def mergepatch(self, repo, mergeq, series):
383 def mergepatch(self, repo, mergeq, series):
384 if len(self.applied) == 0:
384 if len(self.applied) == 0:
385 # each of the patches merged in will have two parents. This
385 # each of the patches merged in will have two parents. This
386 # can confuse the qrefresh, qdiff, and strip code because it
386 # can confuse the qrefresh, qdiff, and strip code because it
387 # needs to know which parent is actually in the patch queue.
387 # needs to know which parent is actually in the patch queue.
388 # so, we insert a merge marker with only one parent. This way
388 # so, we insert a merge marker with only one parent. This way
389 # the first patch in the queue is never a merge patch
389 # the first patch in the queue is never a merge patch
390 #
390 #
391 pname = ".hg.patches.merge.marker"
391 pname = ".hg.patches.merge.marker"
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
393 self.removeundo(repo)
393 self.removeundo(repo)
394 self.applied.append(statusentry(revlog.hex(n), pname))
394 self.applied.append(statusentry(revlog.hex(n), pname))
395 self.applied_dirty = 1
395 self.applied_dirty = 1
396
396
397 head = self.qparents(repo)
397 head = self.qparents(repo)
398
398
399 for patch in series:
399 for patch in series:
400 patch = mergeq.lookup(patch, strict=True)
400 patch = mergeq.lookup(patch, strict=True)
401 if not patch:
401 if not patch:
402 self.ui.warn("patch %s does not exist\n" % patch)
402 self.ui.warn("patch %s does not exist\n" % patch)
403 return (1, None)
403 return (1, None)
404 pushable, reason = self.pushable(patch)
404 pushable, reason = self.pushable(patch)
405 if not pushable:
405 if not pushable:
406 self.explain_pushable(patch, all_patches=True)
406 self.explain_pushable(patch, all_patches=True)
407 continue
407 continue
408 info = mergeq.isapplied(patch)
408 info = mergeq.isapplied(patch)
409 if not info:
409 if not info:
410 self.ui.warn("patch %s is not applied\n" % patch)
410 self.ui.warn("patch %s is not applied\n" % patch)
411 return (1, None)
411 return (1, None)
412 rev = revlog.bin(info[1])
412 rev = revlog.bin(info[1])
413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
414 if head:
414 if head:
415 self.applied.append(statusentry(revlog.hex(head), patch))
415 self.applied.append(statusentry(revlog.hex(head), patch))
416 self.applied_dirty = 1
416 self.applied_dirty = 1
417 if err:
417 if err:
418 return (err, head)
418 return (err, head)
419 self.save_dirty()
419 self.save_dirty()
420 return (0, head)
420 return (0, head)
421
421
422 def patch(self, repo, patchfile):
422 def patch(self, repo, patchfile):
423 '''Apply patchfile to the working directory.
423 '''Apply patchfile to the working directory.
424 patchfile: file name of patch'''
424 patchfile: file name of patch'''
425 files = {}
425 files = {}
426 try:
426 try:
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 files=files)
428 files=files)
429 except Exception, inst:
429 except Exception, inst:
430 self.ui.note(str(inst) + '\n')
430 self.ui.note(str(inst) + '\n')
431 if not self.ui.verbose:
431 if not self.ui.verbose:
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 return (False, files, False)
433 return (False, files, False)
434
434
435 return (True, files, fuzz)
435 return (True, files, fuzz)
436
436
437 def apply(self, repo, series, list=False, update_status=True,
437 def apply(self, repo, series, list=False, update_status=True,
438 strict=False, patchdir=None, merge=None, all_files={}):
438 strict=False, patchdir=None, merge=None, all_files={}):
439 wlock = lock = tr = None
439 wlock = lock = tr = None
440 try:
440 try:
441 wlock = repo.wlock()
441 wlock = repo.wlock()
442 lock = repo.lock()
442 lock = repo.lock()
443 tr = repo.transaction()
443 tr = repo.transaction()
444 try:
444 try:
445 ret = self._apply(repo, series, list, update_status,
445 ret = self._apply(repo, series, list, update_status,
446 strict, patchdir, merge, all_files=all_files)
446 strict, patchdir, merge, all_files=all_files)
447 tr.close()
447 tr.close()
448 self.save_dirty()
448 self.save_dirty()
449 return ret
449 return ret
450 except:
450 except:
451 try:
451 try:
452 tr.abort()
452 tr.abort()
453 finally:
453 finally:
454 repo.invalidate()
454 repo.invalidate()
455 repo.dirstate.invalidate()
455 repo.dirstate.invalidate()
456 raise
456 raise
457 finally:
457 finally:
458 del tr, lock, wlock
458 del tr, lock, wlock
459 self.removeundo(repo)
459 self.removeundo(repo)
460
460
461 def _apply(self, repo, series, list=False, update_status=True,
461 def _apply(self, repo, series, list=False, update_status=True,
462 strict=False, patchdir=None, merge=None, all_files={}):
462 strict=False, patchdir=None, merge=None, all_files={}):
463 # TODO unify with commands.py
463 # TODO unify with commands.py
464 if not patchdir:
464 if not patchdir:
465 patchdir = self.path
465 patchdir = self.path
466 err = 0
466 err = 0
467 n = None
467 n = None
468 for patchname in series:
468 for patchname in series:
469 pushable, reason = self.pushable(patchname)
469 pushable, reason = self.pushable(patchname)
470 if not pushable:
470 if not pushable:
471 self.explain_pushable(patchname, all_patches=True)
471 self.explain_pushable(patchname, all_patches=True)
472 continue
472 continue
473 self.ui.warn("applying %s\n" % patchname)
473 self.ui.warn("applying %s\n" % patchname)
474 pf = os.path.join(patchdir, patchname)
474 pf = os.path.join(patchdir, patchname)
475
475
476 try:
476 try:
477 message, comments, user, date, patchfound = self.readheaders(patchname)
477 message, comments, user, date, patchfound = self.readheaders(patchname)
478 except:
478 except:
479 self.ui.warn("Unable to read %s\n" % patchname)
479 self.ui.warn("Unable to read %s\n" % patchname)
480 err = 1
480 err = 1
481 break
481 break
482
482
483 if not message:
483 if not message:
484 message = "imported patch %s\n" % patchname
484 message = "imported patch %s\n" % patchname
485 else:
485 else:
486 if list:
486 if list:
487 message.append("\nimported patch %s" % patchname)
487 message.append("\nimported patch %s" % patchname)
488 message = '\n'.join(message)
488 message = '\n'.join(message)
489
489
490 (patcherr, files, fuzz) = self.patch(repo, pf)
490 (patcherr, files, fuzz) = self.patch(repo, pf)
491 all_files.update(files)
491 all_files.update(files)
492 patcherr = not patcherr
492 patcherr = not patcherr
493
493
494 if merge and files:
494 if merge and files:
495 # Mark as removed/merged and update dirstate parent info
495 # Mark as removed/merged and update dirstate parent info
496 removed = []
496 removed = []
497 merged = []
497 merged = []
498 for f in files:
498 for f in files:
499 if os.path.exists(repo.wjoin(f)):
499 if os.path.exists(repo.wjoin(f)):
500 merged.append(f)
500 merged.append(f)
501 else:
501 else:
502 removed.append(f)
502 removed.append(f)
503 for f in removed:
503 for f in removed:
504 repo.dirstate.remove(f)
504 repo.dirstate.remove(f)
505 for f in merged:
505 for f in merged:
506 repo.dirstate.merge(f)
506 repo.dirstate.merge(f)
507 p1, p2 = repo.dirstate.parents()
507 p1, p2 = repo.dirstate.parents()
508 repo.dirstate.setparents(p1, merge)
508 repo.dirstate.setparents(p1, merge)
509 files = patch.updatedir(self.ui, repo, files)
509 files = patch.updatedir(self.ui, repo, files)
510 n = repo.commit(files, message, user, date, force=1)
510 n = repo.commit(files, message, user, date, force=1)
511
511
512 if n == None:
512 if n == None:
513 raise util.Abort(_("repo commit failed"))
513 raise util.Abort(_("repo commit failed"))
514
514
515 if update_status:
515 if update_status:
516 self.applied.append(statusentry(revlog.hex(n), patchname))
516 self.applied.append(statusentry(revlog.hex(n), patchname))
517
517
518 if patcherr:
518 if patcherr:
519 if not patchfound:
519 if not patchfound:
520 self.ui.warn("patch %s is empty\n" % patchname)
520 self.ui.warn("patch %s is empty\n" % patchname)
521 err = 0
521 err = 0
522 else:
522 else:
523 self.ui.warn("patch failed, rejects left in working dir\n")
523 self.ui.warn("patch failed, rejects left in working dir\n")
524 err = 1
524 err = 1
525 break
525 break
526
526
527 if fuzz and strict:
527 if fuzz and strict:
528 self.ui.warn("fuzz found when applying patch, stopping\n")
528 self.ui.warn("fuzz found when applying patch, stopping\n")
529 err = 1
529 err = 1
530 break
530 break
531 return (err, n)
531 return (err, n)
532
532
533 def delete(self, repo, patches, opts):
533 def delete(self, repo, patches, opts):
534 if not patches and not opts.get('rev'):
534 if not patches and not opts.get('rev'):
535 raise util.Abort(_('qdelete requires at least one revision or '
535 raise util.Abort(_('qdelete requires at least one revision or '
536 'patch name'))
536 'patch name'))
537
537
538 realpatches = []
538 realpatches = []
539 for patch in patches:
539 for patch in patches:
540 patch = self.lookup(patch, strict=True)
540 patch = self.lookup(patch, strict=True)
541 info = self.isapplied(patch)
541 info = self.isapplied(patch)
542 if info:
542 if info:
543 raise util.Abort(_("cannot delete applied patch %s") % patch)
543 raise util.Abort(_("cannot delete applied patch %s") % patch)
544 if patch not in self.series:
544 if patch not in self.series:
545 raise util.Abort(_("patch %s not in series file") % patch)
545 raise util.Abort(_("patch %s not in series file") % patch)
546 realpatches.append(patch)
546 realpatches.append(patch)
547
547
548 appliedbase = 0
548 appliedbase = 0
549 if opts.get('rev'):
549 if opts.get('rev'):
550 if not self.applied:
550 if not self.applied:
551 raise util.Abort(_('no patches applied'))
551 raise util.Abort(_('no patches applied'))
552 revs = cmdutil.revrange(repo, opts['rev'])
552 revs = cmdutil.revrange(repo, opts['rev'])
553 if len(revs) > 1 and revs[0] > revs[1]:
553 if len(revs) > 1 and revs[0] > revs[1]:
554 revs.reverse()
554 revs.reverse()
555 for rev in revs:
555 for rev in revs:
556 if appliedbase >= len(self.applied):
556 if appliedbase >= len(self.applied):
557 raise util.Abort(_("revision %d is not managed") % rev)
557 raise util.Abort(_("revision %d is not managed") % rev)
558
558
559 base = revlog.bin(self.applied[appliedbase].rev)
559 base = revlog.bin(self.applied[appliedbase].rev)
560 node = repo.changelog.node(rev)
560 node = repo.changelog.node(rev)
561 if node != base:
561 if node != base:
562 raise util.Abort(_("cannot delete revision %d above "
562 raise util.Abort(_("cannot delete revision %d above "
563 "applied patches") % rev)
563 "applied patches") % rev)
564 realpatches.append(self.applied[appliedbase].name)
564 realpatches.append(self.applied[appliedbase].name)
565 appliedbase += 1
565 appliedbase += 1
566
566
567 if not opts.get('keep'):
567 if not opts.get('keep'):
568 r = self.qrepo()
568 r = self.qrepo()
569 if r:
569 if r:
570 r.remove(realpatches, True)
570 r.remove(realpatches, True)
571 else:
571 else:
572 for p in realpatches:
572 for p in realpatches:
573 os.unlink(self.join(p))
573 os.unlink(self.join(p))
574
574
575 if appliedbase:
575 if appliedbase:
576 del self.applied[:appliedbase]
576 del self.applied[:appliedbase]
577 self.applied_dirty = 1
577 self.applied_dirty = 1
578 indices = [self.find_series(p) for p in realpatches]
578 indices = [self.find_series(p) for p in realpatches]
579 indices.sort()
579 indices.sort()
580 for i in indices[-1::-1]:
580 for i in indices[-1::-1]:
581 del self.full_series[i]
581 del self.full_series[i]
582 self.parse_series()
582 self.parse_series()
583 self.series_dirty = 1
583 self.series_dirty = 1
584
584
585 def check_toppatch(self, repo):
585 def check_toppatch(self, repo):
586 if len(self.applied) > 0:
586 if len(self.applied) > 0:
587 top = revlog.bin(self.applied[-1].rev)
587 top = revlog.bin(self.applied[-1].rev)
588 pp = repo.dirstate.parents()
588 pp = repo.dirstate.parents()
589 if top not in pp:
589 if top not in pp:
590 raise util.Abort(_("working directory revision is not qtip"))
590 raise util.Abort(_("working directory revision is not qtip"))
591 return top
591 return top
592 return None
592 return None
593 def check_localchanges(self, repo, force=False, refresh=True):
593 def check_localchanges(self, repo, force=False, refresh=True):
594 m, a, r, d = repo.status()[:4]
594 m, a, r, d = repo.status()[:4]
595 if m or a or r or d:
595 if m or a or r or d:
596 if not force:
596 if not force:
597 if refresh:
597 if refresh:
598 raise util.Abort(_("local changes found, refresh first"))
598 raise util.Abort(_("local changes found, refresh first"))
599 else:
599 else:
600 raise util.Abort(_("local changes found"))
600 raise util.Abort(_("local changes found"))
601 return m, a, r, d
601 return m, a, r, d
602
602
603 _reserved = ('series', 'status', 'guards')
603 _reserved = ('series', 'status', 'guards')
604 def check_reserved_name(self, name):
604 def check_reserved_name(self, name):
605 if (name in self._reserved or name.startswith('.hg')
605 if (name in self._reserved or name.startswith('.hg')
606 or name.startswith('.mq')):
606 or name.startswith('.mq')):
607 raise util.Abort(_('"%s" cannot be used as the name of a patch')
607 raise util.Abort(_('"%s" cannot be used as the name of a patch')
608 % name)
608 % name)
609
609
610 def new(self, repo, patch, *pats, **opts):
610 def new(self, repo, patch, *pats, **opts):
611 msg = opts.get('msg')
611 msg = opts.get('msg')
612 force = opts.get('force')
612 force = opts.get('force')
613 user = opts.get('user')
613 user = opts.get('user')
614 date = opts.get('date')
614 date = opts.get('date')
615 self.check_reserved_name(patch)
615 self.check_reserved_name(patch)
616 if os.path.exists(self.join(patch)):
616 if os.path.exists(self.join(patch)):
617 raise util.Abort(_('patch "%s" already exists') % patch)
617 raise util.Abort(_('patch "%s" already exists') % patch)
618 if opts.get('include') or opts.get('exclude') or pats:
618 if opts.get('include') or opts.get('exclude') or pats:
619 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
619 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
620 m, a, r, d = repo.status(files=fns, match=match)[:4]
620 m, a, r, d = repo.status(files=fns, match=match)[:4]
621 else:
621 else:
622 m, a, r, d = self.check_localchanges(repo, force)
622 m, a, r, d = self.check_localchanges(repo, force)
623 fns, match, anypats = cmdutil.matchpats(repo, m + a + r)
623 fns, match, anypats = cmdutil.matchpats(repo, m + a + r)
624 commitfiles = m + a + r
624 commitfiles = m + a + r
625 self.check_toppatch(repo)
625 self.check_toppatch(repo)
626 wlock = repo.wlock()
626 wlock = repo.wlock()
627 try:
627 try:
628 insert = self.full_series_end()
628 insert = self.full_series_end()
629 commitmsg = msg and msg or ("[mq]: %s" % patch)
629 commitmsg = msg and msg or ("[mq]: %s" % patch)
630 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
630 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
631 if n == None:
631 if n == None:
632 raise util.Abort(_("repo commit failed"))
632 raise util.Abort(_("repo commit failed"))
633 self.full_series[insert:insert] = [patch]
633 self.full_series[insert:insert] = [patch]
634 self.applied.append(statusentry(revlog.hex(n), patch))
634 self.applied.append(statusentry(revlog.hex(n), patch))
635 self.parse_series()
635 self.parse_series()
636 self.series_dirty = 1
636 self.series_dirty = 1
637 self.applied_dirty = 1
637 self.applied_dirty = 1
638 p = self.opener(patch, "w")
638 p = self.opener(patch, "w")
639 if date:
639 if date:
640 p.write("# HG changeset patch\n")
640 p.write("# HG changeset patch\n")
641 if user:
641 if user:
642 p.write("# User " + user + "\n")
642 p.write("# User " + user + "\n")
643 p.write("# Date " + date + "\n")
643 p.write("# Date " + date + "\n")
644 p.write("\n")
644 p.write("\n")
645 elif user:
645 elif user:
646 p.write("From: " + user + "\n")
646 p.write("From: " + user + "\n")
647 p.write("\n")
647 p.write("\n")
648 if msg:
648 if msg:
649 msg = msg + "\n"
649 msg = msg + "\n"
650 p.write(msg)
650 p.write(msg)
651 p.close()
651 p.close()
652 wlock = None
652 wlock = None
653 r = self.qrepo()
653 r = self.qrepo()
654 if r: r.add([patch])
654 if r: r.add([patch])
655 if commitfiles:
655 if commitfiles:
656 self.refresh(repo, short=True, git=opts.get('git'))
656 self.refresh(repo, short=True, git=opts.get('git'))
657 self.removeundo(repo)
657 self.removeundo(repo)
658 finally:
658 finally:
659 del wlock
659 del wlock
660
660
661 def strip(self, repo, rev, update=True, backup="all"):
661 def strip(self, repo, rev, update=True, backup="all"):
662 wlock = lock = None
662 wlock = lock = None
663 try:
663 try:
664 wlock = repo.wlock()
664 wlock = repo.wlock()
665 lock = repo.lock()
665 lock = repo.lock()
666
666
667 if update:
667 if update:
668 self.check_localchanges(repo, refresh=False)
668 self.check_localchanges(repo, refresh=False)
669 urev = self.qparents(repo, rev)
669 urev = self.qparents(repo, rev)
670 hg.clean(repo, urev)
670 hg.clean(repo, urev)
671 repo.dirstate.write()
671 repo.dirstate.write()
672
672
673 self.removeundo(repo)
673 self.removeundo(repo)
674 repair.strip(self.ui, repo, rev, backup)
674 repair.strip(self.ui, repo, rev, backup)
675 # strip may have unbundled a set of backed up revisions after
675 # strip may have unbundled a set of backed up revisions after
676 # the actual strip
676 # the actual strip
677 self.removeundo(repo)
677 self.removeundo(repo)
678 finally:
678 finally:
679 del lock, wlock
679 del lock, wlock
680
680
681 def isapplied(self, patch):
681 def isapplied(self, patch):
682 """returns (index, rev, patch)"""
682 """returns (index, rev, patch)"""
683 for i in xrange(len(self.applied)):
683 for i in xrange(len(self.applied)):
684 a = self.applied[i]
684 a = self.applied[i]
685 if a.name == patch:
685 if a.name == patch:
686 return (i, a.rev, a.name)
686 return (i, a.rev, a.name)
687 return None
687 return None
688
688
689 # if the exact patch name does not exist, we try a few
689 # if the exact patch name does not exist, we try a few
690 # variations. If strict is passed, we try only #1
690 # variations. If strict is passed, we try only #1
691 #
691 #
692 # 1) a number to indicate an offset in the series file
692 # 1) a number to indicate an offset in the series file
693 # 2) a unique substring of the patch name was given
693 # 2) a unique substring of the patch name was given
694 # 3) patchname[-+]num to indicate an offset in the series file
694 # 3) patchname[-+]num to indicate an offset in the series file
695 def lookup(self, patch, strict=False):
695 def lookup(self, patch, strict=False):
696 patch = patch and str(patch)
696 patch = patch and str(patch)
697
697
698 def partial_name(s):
698 def partial_name(s):
699 if s in self.series:
699 if s in self.series:
700 return s
700 return s
701 matches = [x for x in self.series if s in x]
701 matches = [x for x in self.series if s in x]
702 if len(matches) > 1:
702 if len(matches) > 1:
703 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
703 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
704 for m in matches:
704 for m in matches:
705 self.ui.warn(' %s\n' % m)
705 self.ui.warn(' %s\n' % m)
706 return None
706 return None
707 if matches:
707 if matches:
708 return matches[0]
708 return matches[0]
709 if len(self.series) > 0 and len(self.applied) > 0:
709 if len(self.series) > 0 and len(self.applied) > 0:
710 if s == 'qtip':
710 if s == 'qtip':
711 return self.series[self.series_end(True)-1]
711 return self.series[self.series_end(True)-1]
712 if s == 'qbase':
712 if s == 'qbase':
713 return self.series[0]
713 return self.series[0]
714 return None
714 return None
715 if patch == None:
715 if patch == None:
716 return None
716 return None
717
717
718 # we don't want to return a partial match until we make
718 # we don't want to return a partial match until we make
719 # sure the file name passed in does not exist (checked below)
719 # sure the file name passed in does not exist (checked below)
720 res = partial_name(patch)
720 res = partial_name(patch)
721 if res and res == patch:
721 if res and res == patch:
722 return res
722 return res
723
723
724 if not os.path.isfile(self.join(patch)):
724 if not os.path.isfile(self.join(patch)):
725 try:
725 try:
726 sno = int(patch)
726 sno = int(patch)
727 except(ValueError, OverflowError):
727 except(ValueError, OverflowError):
728 pass
728 pass
729 else:
729 else:
730 if sno < len(self.series):
730 if sno < len(self.series):
731 return self.series[sno]
731 return self.series[sno]
732 if not strict:
732 if not strict:
733 # return any partial match made above
733 # return any partial match made above
734 if res:
734 if res:
735 return res
735 return res
736 minus = patch.rfind('-')
736 minus = patch.rfind('-')
737 if minus >= 0:
737 if minus >= 0:
738 res = partial_name(patch[:minus])
738 res = partial_name(patch[:minus])
739 if res:
739 if res:
740 i = self.series.index(res)
740 i = self.series.index(res)
741 try:
741 try:
742 off = int(patch[minus+1:] or 1)
742 off = int(patch[minus+1:] or 1)
743 except(ValueError, OverflowError):
743 except(ValueError, OverflowError):
744 pass
744 pass
745 else:
745 else:
746 if i - off >= 0:
746 if i - off >= 0:
747 return self.series[i - off]
747 return self.series[i - off]
748 plus = patch.rfind('+')
748 plus = patch.rfind('+')
749 if plus >= 0:
749 if plus >= 0:
750 res = partial_name(patch[:plus])
750 res = partial_name(patch[:plus])
751 if res:
751 if res:
752 i = self.series.index(res)
752 i = self.series.index(res)
753 try:
753 try:
754 off = int(patch[plus+1:] or 1)
754 off = int(patch[plus+1:] or 1)
755 except(ValueError, OverflowError):
755 except(ValueError, OverflowError):
756 pass
756 pass
757 else:
757 else:
758 if i + off < len(self.series):
758 if i + off < len(self.series):
759 return self.series[i + off]
759 return self.series[i + off]
760 raise util.Abort(_("patch %s not in series") % patch)
760 raise util.Abort(_("patch %s not in series") % patch)
761
761
762 def push(self, repo, patch=None, force=False, list=False,
762 def push(self, repo, patch=None, force=False, list=False,
763 mergeq=None):
763 mergeq=None):
764 wlock = repo.wlock()
764 wlock = repo.wlock()
765 try:
765 try:
766 patch = self.lookup(patch)
766 patch = self.lookup(patch)
767 # Suppose our series file is: A B C and the current 'top'
767 # Suppose our series file is: A B C and the current 'top'
768 # patch is B. qpush C should be performed (moving forward)
768 # patch is B. qpush C should be performed (moving forward)
769 # qpush B is a NOP (no change) qpush A is an error (can't
769 # qpush B is a NOP (no change) qpush A is an error (can't
770 # go backwards with qpush)
770 # go backwards with qpush)
771 if patch:
771 if patch:
772 info = self.isapplied(patch)
772 info = self.isapplied(patch)
773 if info:
773 if info:
774 if info[0] < len(self.applied) - 1:
774 if info[0] < len(self.applied) - 1:
775 raise util.Abort(
775 raise util.Abort(
776 _("cannot push to a previous patch: %s") % patch)
776 _("cannot push to a previous patch: %s") % patch)
777 if info[0] < len(self.series) - 1:
777 if info[0] < len(self.series) - 1:
778 self.ui.warn(
778 self.ui.warn(
779 _('qpush: %s is already at the top\n') % patch)
779 _('qpush: %s is already at the top\n') % patch)
780 else:
780 else:
781 self.ui.warn(_('all patches are currently applied\n'))
781 self.ui.warn(_('all patches are currently applied\n'))
782 return
782 return
783
783
784 # Following the above example, starting at 'top' of B:
784 # Following the above example, starting at 'top' of B:
785 # qpush should be performed (pushes C), but a subsequent
785 # qpush should be performed (pushes C), but a subsequent
786 # qpush without an argument is an error (nothing to
786 # qpush without an argument is an error (nothing to
787 # apply). This allows a loop of "...while hg qpush..." to
787 # apply). This allows a loop of "...while hg qpush..." to
788 # work as it detects an error when done
788 # work as it detects an error when done
789 if self.series_end() == len(self.series):
789 if self.series_end() == len(self.series):
790 self.ui.warn(_('patch series already fully applied\n'))
790 self.ui.warn(_('patch series already fully applied\n'))
791 return 1
791 return 1
792 if not force:
792 if not force:
793 self.check_localchanges(repo)
793 self.check_localchanges(repo)
794
794
795 self.applied_dirty = 1;
795 self.applied_dirty = 1;
796 start = self.series_end()
796 start = self.series_end()
797 if start > 0:
797 if start > 0:
798 self.check_toppatch(repo)
798 self.check_toppatch(repo)
799 if not patch:
799 if not patch:
800 patch = self.series[start]
800 patch = self.series[start]
801 end = start + 1
801 end = start + 1
802 else:
802 else:
803 end = self.series.index(patch, start) + 1
803 end = self.series.index(patch, start) + 1
804 s = self.series[start:end]
804 s = self.series[start:end]
805 all_files = {}
805 all_files = {}
806 try:
806 try:
807 if mergeq:
807 if mergeq:
808 ret = self.mergepatch(repo, mergeq, s)
808 ret = self.mergepatch(repo, mergeq, s)
809 else:
809 else:
810 ret = self.apply(repo, s, list, all_files=all_files)
810 ret = self.apply(repo, s, list, all_files=all_files)
811 except:
811 except:
812 self.ui.warn(_('cleaning up working directory...'))
812 self.ui.warn(_('cleaning up working directory...'))
813 node = repo.dirstate.parents()[0]
813 node = repo.dirstate.parents()[0]
814 hg.revert(repo, node, None)
814 hg.revert(repo, node, None)
815 unknown = repo.status()[4]
815 unknown = repo.status()[4]
816 # only remove unknown files that we know we touched or
816 # only remove unknown files that we know we touched or
817 # created while patching
817 # created while patching
818 for f in unknown:
818 for f in unknown:
819 if f in all_files:
819 if f in all_files:
820 util.unlink(repo.wjoin(f))
820 util.unlink(repo.wjoin(f))
821 self.ui.warn(_('done\n'))
821 self.ui.warn(_('done\n'))
822 raise
822 raise
823 top = self.applied[-1].name
823 top = self.applied[-1].name
824 if ret[0]:
824 if ret[0]:
825 self.ui.write(
825 self.ui.write(
826 "Errors during apply, please fix and refresh %s\n" % top)
826 "Errors during apply, please fix and refresh %s\n" % top)
827 else:
827 else:
828 self.ui.write("Now at: %s\n" % top)
828 self.ui.write("Now at: %s\n" % top)
829 return ret[0]
829 return ret[0]
830 finally:
830 finally:
831 del wlock
831 del wlock
832
832
833 def pop(self, repo, patch=None, force=False, update=True, all=False):
833 def pop(self, repo, patch=None, force=False, update=True, all=False):
834 def getfile(f, rev, flags):
834 def getfile(f, rev, flags):
835 t = repo.file(f).read(rev)
835 t = repo.file(f).read(rev)
836 repo.wwrite(f, t, flags)
836 repo.wwrite(f, t, flags)
837
837
838 wlock = repo.wlock()
838 wlock = repo.wlock()
839 try:
839 try:
840 if patch:
840 if patch:
841 # index, rev, patch
841 # index, rev, patch
842 info = self.isapplied(patch)
842 info = self.isapplied(patch)
843 if not info:
843 if not info:
844 patch = self.lookup(patch)
844 patch = self.lookup(patch)
845 info = self.isapplied(patch)
845 info = self.isapplied(patch)
846 if not info:
846 if not info:
847 raise util.Abort(_("patch %s is not applied") % patch)
847 raise util.Abort(_("patch %s is not applied") % patch)
848
848
849 if len(self.applied) == 0:
849 if len(self.applied) == 0:
850 # Allow qpop -a to work repeatedly,
850 # Allow qpop -a to work repeatedly,
851 # but not qpop without an argument
851 # but not qpop without an argument
852 self.ui.warn(_("no patches applied\n"))
852 self.ui.warn(_("no patches applied\n"))
853 return not all
853 return not all
854
854
855 if not update:
855 if not update:
856 parents = repo.dirstate.parents()
856 parents = repo.dirstate.parents()
857 rr = [ revlog.bin(x.rev) for x in self.applied ]
857 rr = [ revlog.bin(x.rev) for x in self.applied ]
858 for p in parents:
858 for p in parents:
859 if p in rr:
859 if p in rr:
860 self.ui.warn("qpop: forcing dirstate update\n")
860 self.ui.warn("qpop: forcing dirstate update\n")
861 update = True
861 update = True
862
862
863 if not force and update:
863 if not force and update:
864 self.check_localchanges(repo)
864 self.check_localchanges(repo)
865
865
866 self.applied_dirty = 1;
866 self.applied_dirty = 1;
867 end = len(self.applied)
867 end = len(self.applied)
868 if not patch:
868 if not patch:
869 if all:
869 if all:
870 popi = 0
870 popi = 0
871 else:
871 else:
872 popi = len(self.applied) - 1
872 popi = len(self.applied) - 1
873 else:
873 else:
874 popi = info[0] + 1
874 popi = info[0] + 1
875 if popi >= end:
875 if popi >= end:
876 self.ui.warn("qpop: %s is already at the top\n" % patch)
876 self.ui.warn("qpop: %s is already at the top\n" % patch)
877 return
877 return
878 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
878 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
879
879
880 start = info[0]
880 start = info[0]
881 rev = revlog.bin(info[1])
881 rev = revlog.bin(info[1])
882
882
883 if update:
883 if update:
884 top = self.check_toppatch(repo)
884 top = self.check_toppatch(repo)
885
885
886 if repo.changelog.heads(rev) != [revlog.bin(self.applied[-1].rev)]:
886 if repo.changelog.heads(rev) != [revlog.bin(self.applied[-1].rev)]:
887 raise util.Abort("popping would remove a revision not "
887 raise util.Abort("popping would remove a revision not "
888 "managed by this patch queue")
888 "managed by this patch queue")
889
889
890 # we know there are no local changes, so we can make a simplified
890 # we know there are no local changes, so we can make a simplified
891 # form of hg.update.
891 # form of hg.update.
892 if update:
892 if update:
893 qp = self.qparents(repo, rev)
893 qp = self.qparents(repo, rev)
894 changes = repo.changelog.read(qp)
894 changes = repo.changelog.read(qp)
895 mmap = repo.manifest.read(changes[0])
895 mmap = repo.manifest.read(changes[0])
896 m, a, r, d, u = repo.status(qp, top)[:5]
896 m, a, r, d, u = repo.status(qp, top)[:5]
897 if d:
897 if d:
898 raise util.Abort("deletions found between repo revs")
898 raise util.Abort("deletions found between repo revs")
899 for f in m:
899 for f in m:
900 getfile(f, mmap[f], mmap.flags(f))
900 getfile(f, mmap[f], mmap.flags(f))
901 for f in r:
901 for f in r:
902 getfile(f, mmap[f], mmap.flags(f))
902 getfile(f, mmap[f], mmap.flags(f))
903 for f in m + r:
903 for f in m + r:
904 repo.dirstate.normal(f)
904 repo.dirstate.normal(f)
905 for f in a:
905 for f in a:
906 try:
906 try:
907 os.unlink(repo.wjoin(f))
907 os.unlink(repo.wjoin(f))
908 except OSError, e:
908 except OSError, e:
909 if e.errno != errno.ENOENT:
909 if e.errno != errno.ENOENT:
910 raise
910 raise
911 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
911 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
912 except: pass
912 except: pass
913 repo.dirstate.forget(f)
913 repo.dirstate.forget(f)
914 repo.dirstate.setparents(qp, revlog.nullid)
914 repo.dirstate.setparents(qp, revlog.nullid)
915 del self.applied[start:end]
915 del self.applied[start:end]
916 self.strip(repo, rev, update=False, backup='strip')
916 self.strip(repo, rev, update=False, backup='strip')
917 if len(self.applied):
917 if len(self.applied):
918 self.ui.write("Now at: %s\n" % self.applied[-1].name)
918 self.ui.write("Now at: %s\n" % self.applied[-1].name)
919 else:
919 else:
920 self.ui.write("Patch queue now empty\n")
920 self.ui.write("Patch queue now empty\n")
921 finally:
921 finally:
922 del wlock
922 del wlock
923
923
924 def diff(self, repo, pats, opts):
924 def diff(self, repo, pats, opts):
925 top = self.check_toppatch(repo)
925 top = self.check_toppatch(repo)
926 if not top:
926 if not top:
927 self.ui.write("No patches applied\n")
927 self.ui.write("No patches applied\n")
928 return
928 return
929 qp = self.qparents(repo, top)
929 qp = self.qparents(repo, top)
930 if opts.get('git'):
930 if opts.get('git'):
931 self.diffopts().git = True
931 self.diffopts().git = True
932 self.printdiff(repo, qp, files=pats, opts=opts)
932 self.printdiff(repo, qp, files=pats, opts=opts)
933
933
934 def refresh(self, repo, pats=None, **opts):
934 def refresh(self, repo, pats=None, **opts):
935 if len(self.applied) == 0:
935 if len(self.applied) == 0:
936 self.ui.write("No patches applied\n")
936 self.ui.write("No patches applied\n")
937 return 1
937 return 1
938 wlock = repo.wlock()
938 wlock = repo.wlock()
939 try:
939 try:
940 self.check_toppatch(repo)
940 self.check_toppatch(repo)
941 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
941 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
942 top = revlog.bin(top)
942 top = revlog.bin(top)
943 if repo.changelog.heads(top) != [top]:
943 if repo.changelog.heads(top) != [top]:
944 raise util.Abort("cannot refresh a revision with children")
944 raise util.Abort("cannot refresh a revision with children")
945 cparents = repo.changelog.parents(top)
945 cparents = repo.changelog.parents(top)
946 patchparent = self.qparents(repo, top)
946 patchparent = self.qparents(repo, top)
947 message, comments, user, date, patchfound = self.readheaders(patchfn)
947 message, comments, user, date, patchfound = self.readheaders(patchfn)
948
948
949 patchf = self.opener(patchfn, 'r+')
949 patchf = self.opener(patchfn, 'r+')
950
950
951 # if the patch was a git patch, refresh it as a git patch
951 # if the patch was a git patch, refresh it as a git patch
952 for line in patchf:
952 for line in patchf:
953 if line.startswith('diff --git'):
953 if line.startswith('diff --git'):
954 self.diffopts().git = True
954 self.diffopts().git = True
955 break
955 break
956
956
957 msg = opts.get('msg', '').rstrip()
957 msg = opts.get('msg', '').rstrip()
958 if msg and comments:
958 if msg and comments:
959 # Remove existing message, keeping the rest of the comments
959 # Remove existing message, keeping the rest of the comments
960 # fields.
960 # fields.
961 # If comments contains 'subject: ', message will prepend
961 # If comments contains 'subject: ', message will prepend
962 # the field and a blank line.
962 # the field and a blank line.
963 if message:
963 if message:
964 subj = 'subject: ' + message[0].lower()
964 subj = 'subject: ' + message[0].lower()
965 for i in xrange(len(comments)):
965 for i in xrange(len(comments)):
966 if subj == comments[i].lower():
966 if subj == comments[i].lower():
967 del comments[i]
967 del comments[i]
968 message = message[2:]
968 message = message[2:]
969 break
969 break
970 ci = 0
970 ci = 0
971 for mi in xrange(len(message)):
971 for mi in xrange(len(message)):
972 while message[mi] != comments[ci]:
972 while message[mi] != comments[ci]:
973 ci += 1
973 ci += 1
974 del comments[ci]
974 del comments[ci]
975
975
976 def setheaderfield(comments, prefixes, new):
976 def setheaderfield(comments, prefixes, new):
977 # Update all references to a field in the patch header.
977 # Update all references to a field in the patch header.
978 # If none found, add it email style.
978 # If none found, add it email style.
979 res = False
979 res = False
980 for prefix in prefixes:
980 for prefix in prefixes:
981 for i in xrange(len(comments)):
981 for i in xrange(len(comments)):
982 if comments[i].startswith(prefix):
982 if comments[i].startswith(prefix):
983 comments[i] = prefix + new
983 comments[i] = prefix + new
984 res = True
984 res = True
985 break
985 break
986 return res
986 return res
987
987
988 newuser = opts.get('user')
988 newuser = opts.get('user')
989 if newuser:
989 if newuser:
990 if not setheaderfield(comments, ['From: ', '# User '], newuser):
990 if not setheaderfield(comments, ['From: ', '# User '], newuser):
991 try:
991 try:
992 patchheaderat = comments.index('# HG changeset patch')
992 patchheaderat = comments.index('# HG changeset patch')
993 comments.insert(patchheaderat + 1,'# User ' + newuser)
993 comments.insert(patchheaderat + 1,'# User ' + newuser)
994 except ValueError:
994 except ValueError:
995 comments = ['From: ' + newuser, ''] + comments
995 comments = ['From: ' + newuser, ''] + comments
996 user = newuser
996 user = newuser
997
997
998 newdate = opts.get('date')
998 newdate = opts.get('date')
999 if newdate:
999 if newdate:
1000 if setheaderfield(comments, ['# Date '], newdate):
1000 if setheaderfield(comments, ['# Date '], newdate):
1001 date = newdate
1001 date = newdate
1002
1002
1003 if msg:
1003 if msg:
1004 comments.append(msg)
1004 comments.append(msg)
1005
1005
1006 patchf.seek(0)
1006 patchf.seek(0)
1007 patchf.truncate()
1007 patchf.truncate()
1008
1008
1009 if comments:
1009 if comments:
1010 comments = "\n".join(comments) + '\n\n'
1010 comments = "\n".join(comments) + '\n\n'
1011 patchf.write(comments)
1011 patchf.write(comments)
1012
1012
1013 if opts.get('git'):
1013 if opts.get('git'):
1014 self.diffopts().git = True
1014 self.diffopts().git = True
1015 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1015 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1016 tip = repo.changelog.tip()
1016 tip = repo.changelog.tip()
1017 if top == tip:
1017 if top == tip:
1018 # if the top of our patch queue is also the tip, there is an
1018 # if the top of our patch queue is also the tip, there is an
1019 # optimization here. We update the dirstate in place and strip
1019 # optimization here. We update the dirstate in place and strip
1020 # off the tip commit. Then just commit the current directory
1020 # off the tip commit. Then just commit the current directory
1021 # tree. We can also send repo.commit the list of files
1021 # tree. We can also send repo.commit the list of files
1022 # changed to speed up the diff
1022 # changed to speed up the diff
1023 #
1023 #
1024 # in short mode, we only diff the files included in the
1024 # in short mode, we only diff the files included in the
1025 # patch already
1025 # patch already
1026 #
1026 #
1027 # this should really read:
1027 # this should really read:
1028 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1028 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1029 # but we do it backwards to take advantage of manifest/chlog
1029 # but we do it backwards to take advantage of manifest/chlog
1030 # caching against the next repo.status call
1030 # caching against the next repo.status call
1031 #
1031 #
1032 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1032 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1033 changes = repo.changelog.read(tip)
1033 changes = repo.changelog.read(tip)
1034 man = repo.manifest.read(changes[0])
1034 man = repo.manifest.read(changes[0])
1035 aaa = aa[:]
1035 aaa = aa[:]
1036 if opts.get('short'):
1036 if opts.get('short'):
1037 filelist = mm + aa + dd
1037 filelist = mm + aa + dd
1038 match = dict.fromkeys(filelist).__contains__
1038 match = dict.fromkeys(filelist).__contains__
1039 else:
1039 else:
1040 filelist = None
1040 filelist = None
1041 match = util.always
1041 match = util.always
1042 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1042 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1043
1043
1044 # we might end up with files that were added between
1044 # we might end up with files that were added between
1045 # tip and the dirstate parent, but then changed in the
1045 # tip and the dirstate parent, but then changed in the
1046 # local dirstate. in this case, we want them to only
1046 # local dirstate. in this case, we want them to only
1047 # show up in the added section
1047 # show up in the added section
1048 for x in m:
1048 for x in m:
1049 if x not in aa:
1049 if x not in aa:
1050 mm.append(x)
1050 mm.append(x)
1051 # we might end up with files added by the local dirstate that
1051 # we might end up with files added by the local dirstate that
1052 # were deleted by the patch. In this case, they should only
1052 # were deleted by the patch. In this case, they should only
1053 # show up in the changed section.
1053 # show up in the changed section.
1054 for x in a:
1054 for x in a:
1055 if x in dd:
1055 if x in dd:
1056 del dd[dd.index(x)]
1056 del dd[dd.index(x)]
1057 mm.append(x)
1057 mm.append(x)
1058 else:
1058 else:
1059 aa.append(x)
1059 aa.append(x)
1060 # make sure any files deleted in the local dirstate
1060 # make sure any files deleted in the local dirstate
1061 # are not in the add or change column of the patch
1061 # are not in the add or change column of the patch
1062 forget = []
1062 forget = []
1063 for x in d + r:
1063 for x in d + r:
1064 if x in aa:
1064 if x in aa:
1065 del aa[aa.index(x)]
1065 del aa[aa.index(x)]
1066 forget.append(x)
1066 forget.append(x)
1067 continue
1067 continue
1068 elif x in mm:
1068 elif x in mm:
1069 del mm[mm.index(x)]
1069 del mm[mm.index(x)]
1070 dd.append(x)
1070 dd.append(x)
1071
1071
1072 m = util.unique(mm)
1072 m = util.unique(mm)
1073 r = util.unique(dd)
1073 r = util.unique(dd)
1074 a = util.unique(aa)
1074 a = util.unique(aa)
1075 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1075 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1076 filelist = util.unique(c[0] + c[1] + c[2])
1076 filelist = util.unique(c[0] + c[1] + c[2])
1077 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1077 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1078 fp=patchf, changes=c, opts=self.diffopts())
1078 fp=patchf, changes=c, opts=self.diffopts())
1079 patchf.close()
1079 patchf.close()
1080
1080
1081 repo.dirstate.setparents(*cparents)
1081 repo.dirstate.setparents(*cparents)
1082 copies = {}
1082 copies = {}
1083 for dst in a:
1083 for dst in a:
1084 src = repo.dirstate.copied(dst)
1084 src = repo.dirstate.copied(dst)
1085 if src is not None:
1085 if src is not None:
1086 copies.setdefault(src, []).append(dst)
1086 copies.setdefault(src, []).append(dst)
1087 repo.dirstate.add(dst)
1087 repo.dirstate.add(dst)
1088 # remember the copies between patchparent and tip
1088 # remember the copies between patchparent and tip
1089 # this may be slow, so don't do it if we're not tracking copies
1089 # this may be slow, so don't do it if we're not tracking copies
1090 if self.diffopts().git:
1090 if self.diffopts().git:
1091 for dst in aaa:
1091 for dst in aaa:
1092 f = repo.file(dst)
1092 f = repo.file(dst)
1093 src = f.renamed(man[dst])
1093 src = f.renamed(man[dst])
1094 if src:
1094 if src:
1095 copies[src[0]] = copies.get(dst, [])
1095 copies[src[0]] = copies.get(dst, [])
1096 if dst in a:
1096 if dst in a:
1097 copies[src[0]].append(dst)
1097 copies[src[0]].append(dst)
1098 # we can't copy a file created by the patch itself
1098 # we can't copy a file created by the patch itself
1099 if dst in copies:
1099 if dst in copies:
1100 del copies[dst]
1100 del copies[dst]
1101 for src, dsts in copies.iteritems():
1101 for src, dsts in copies.iteritems():
1102 for dst in dsts:
1102 for dst in dsts:
1103 repo.dirstate.copy(src, dst)
1103 repo.dirstate.copy(src, dst)
1104 for f in r:
1104 for f in r:
1105 repo.dirstate.remove(f)
1105 repo.dirstate.remove(f)
1106 # if the patch excludes a modified file, mark that
1106 # if the patch excludes a modified file, mark that
1107 # file with mtime=0 so status can see it.
1107 # file with mtime=0 so status can see it.
1108 mm = []
1108 mm = []
1109 for i in xrange(len(m)-1, -1, -1):
1109 for i in xrange(len(m)-1, -1, -1):
1110 if not matchfn(m[i]):
1110 if not matchfn(m[i]):
1111 mm.append(m[i])
1111 mm.append(m[i])
1112 del m[i]
1112 del m[i]
1113 for f in m:
1113 for f in m:
1114 repo.dirstate.normal(f)
1114 repo.dirstate.normal(f)
1115 for f in mm:
1115 for f in mm:
1116 repo.dirstate.normallookup(f)
1116 repo.dirstate.normallookup(f)
1117 for f in forget:
1117 for f in forget:
1118 repo.dirstate.forget(f)
1118 repo.dirstate.forget(f)
1119
1119
1120 if not msg:
1120 if not msg:
1121 if not message:
1121 if not message:
1122 message = "[mq]: %s\n" % patchfn
1122 message = "[mq]: %s\n" % patchfn
1123 else:
1123 else:
1124 message = "\n".join(message)
1124 message = "\n".join(message)
1125 else:
1125 else:
1126 message = msg
1126 message = msg
1127
1127
1128 if not user:
1128 if not user:
1129 user = changes[1]
1129 user = changes[1]
1130
1130
1131 self.applied.pop()
1131 self.applied.pop()
1132 self.applied_dirty = 1
1132 self.applied_dirty = 1
1133 self.strip(repo, top, update=False,
1133 self.strip(repo, top, update=False,
1134 backup='strip')
1134 backup='strip')
1135 n = repo.commit(filelist, message, user, date, match=matchfn,
1135 n = repo.commit(filelist, message, user, date, match=matchfn,
1136 force=1)
1136 force=1)
1137 self.applied.append(statusentry(revlog.hex(n), patchfn))
1137 self.applied.append(statusentry(revlog.hex(n), patchfn))
1138 self.removeundo(repo)
1138 self.removeundo(repo)
1139 else:
1139 else:
1140 self.printdiff(repo, patchparent, fp=patchf)
1140 self.printdiff(repo, patchparent, fp=patchf)
1141 patchf.close()
1141 patchf.close()
1142 added = repo.status()[1]
1142 added = repo.status()[1]
1143 for a in added:
1143 for a in added:
1144 f = repo.wjoin(a)
1144 f = repo.wjoin(a)
1145 try:
1145 try:
1146 os.unlink(f)
1146 os.unlink(f)
1147 except OSError, e:
1147 except OSError, e:
1148 if e.errno != errno.ENOENT:
1148 if e.errno != errno.ENOENT:
1149 raise
1149 raise
1150 try: os.removedirs(os.path.dirname(f))
1150 try: os.removedirs(os.path.dirname(f))
1151 except: pass
1151 except: pass
1152 # forget the file copies in the dirstate
1152 # forget the file copies in the dirstate
1153 # push should readd the files later on
1153 # push should readd the files later on
1154 repo.dirstate.forget(a)
1154 repo.dirstate.forget(a)
1155 self.pop(repo, force=True)
1155 self.pop(repo, force=True)
1156 self.push(repo, force=True)
1156 self.push(repo, force=True)
1157 finally:
1157 finally:
1158 del wlock
1158 del wlock
1159
1159
1160 def init(self, repo, create=False):
1160 def init(self, repo, create=False):
1161 if not create and os.path.isdir(self.path):
1161 if not create and os.path.isdir(self.path):
1162 raise util.Abort(_("patch queue directory already exists"))
1162 raise util.Abort(_("patch queue directory already exists"))
1163 try:
1163 try:
1164 os.mkdir(self.path)
1164 os.mkdir(self.path)
1165 except OSError, inst:
1165 except OSError, inst:
1166 if inst.errno != errno.EEXIST or not create:
1166 if inst.errno != errno.EEXIST or not create:
1167 raise
1167 raise
1168 if create:
1168 if create:
1169 return self.qrepo(create=True)
1169 return self.qrepo(create=True)
1170
1170
1171 def unapplied(self, repo, patch=None):
1171 def unapplied(self, repo, patch=None):
1172 if patch and patch not in self.series:
1172 if patch and patch not in self.series:
1173 raise util.Abort(_("patch %s is not in series file") % patch)
1173 raise util.Abort(_("patch %s is not in series file") % patch)
1174 if not patch:
1174 if not patch:
1175 start = self.series_end()
1175 start = self.series_end()
1176 else:
1176 else:
1177 start = self.series.index(patch) + 1
1177 start = self.series.index(patch) + 1
1178 unapplied = []
1178 unapplied = []
1179 for i in xrange(start, len(self.series)):
1179 for i in xrange(start, len(self.series)):
1180 pushable, reason = self.pushable(i)
1180 pushable, reason = self.pushable(i)
1181 if pushable:
1181 if pushable:
1182 unapplied.append((i, self.series[i]))
1182 unapplied.append((i, self.series[i]))
1183 self.explain_pushable(i)
1183 self.explain_pushable(i)
1184 return unapplied
1184 return unapplied
1185
1185
1186 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1186 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1187 summary=False):
1187 summary=False):
1188 def displayname(patchname):
1188 def displayname(patchname):
1189 if summary:
1189 if summary:
1190 msg = self.readheaders(patchname)[0]
1190 msg = self.readheaders(patchname)[0]
1191 msg = msg and ': ' + msg[0] or ': '
1191 msg = msg and ': ' + msg[0] or ': '
1192 else:
1192 else:
1193 msg = ''
1193 msg = ''
1194 return '%s%s' % (patchname, msg)
1194 return '%s%s' % (patchname, msg)
1195
1195
1196 applied = dict.fromkeys([p.name for p in self.applied])
1196 applied = dict.fromkeys([p.name for p in self.applied])
1197 if length is None:
1197 if length is None:
1198 length = len(self.series) - start
1198 length = len(self.series) - start
1199 if not missing:
1199 if not missing:
1200 for i in xrange(start, start+length):
1200 for i in xrange(start, start+length):
1201 patch = self.series[i]
1201 patch = self.series[i]
1202 if patch in applied:
1202 if patch in applied:
1203 stat = 'A'
1203 stat = 'A'
1204 elif self.pushable(i)[0]:
1204 elif self.pushable(i)[0]:
1205 stat = 'U'
1205 stat = 'U'
1206 else:
1206 else:
1207 stat = 'G'
1207 stat = 'G'
1208 pfx = ''
1208 pfx = ''
1209 if self.ui.verbose:
1209 if self.ui.verbose:
1210 pfx = '%d %s ' % (i, stat)
1210 pfx = '%d %s ' % (i, stat)
1211 elif status and status != stat:
1211 elif status and status != stat:
1212 continue
1212 continue
1213 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1213 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1214 else:
1214 else:
1215 msng_list = []
1215 msng_list = []
1216 for root, dirs, files in os.walk(self.path):
1216 for root, dirs, files in os.walk(self.path):
1217 d = root[len(self.path) + 1:]
1217 d = root[len(self.path) + 1:]
1218 for f in files:
1218 for f in files:
1219 fl = os.path.join(d, f)
1219 fl = os.path.join(d, f)
1220 if (fl not in self.series and
1220 if (fl not in self.series and
1221 fl not in (self.status_path, self.series_path,
1221 fl not in (self.status_path, self.series_path,
1222 self.guards_path)
1222 self.guards_path)
1223 and not fl.startswith('.')):
1223 and not fl.startswith('.')):
1224 msng_list.append(fl)
1224 msng_list.append(fl)
1225 msng_list.sort()
1225 msng_list.sort()
1226 for x in msng_list:
1226 for x in msng_list:
1227 pfx = self.ui.verbose and ('D ') or ''
1227 pfx = self.ui.verbose and ('D ') or ''
1228 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1228 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1229
1229
1230 def issaveline(self, l):
1230 def issaveline(self, l):
1231 if l.name == '.hg.patches.save.line':
1231 if l.name == '.hg.patches.save.line':
1232 return True
1232 return True
1233
1233
1234 def qrepo(self, create=False):
1234 def qrepo(self, create=False):
1235 if create or os.path.isdir(self.join(".hg")):
1235 if create or os.path.isdir(self.join(".hg")):
1236 return hg.repository(self.ui, path=self.path, create=create)
1236 return hg.repository(self.ui, path=self.path, create=create)
1237
1237
1238 def restore(self, repo, rev, delete=None, qupdate=None):
1238 def restore(self, repo, rev, delete=None, qupdate=None):
1239 c = repo.changelog.read(rev)
1239 c = repo.changelog.read(rev)
1240 desc = c[4].strip()
1240 desc = c[4].strip()
1241 lines = desc.splitlines()
1241 lines = desc.splitlines()
1242 i = 0
1242 i = 0
1243 datastart = None
1243 datastart = None
1244 series = []
1244 series = []
1245 applied = []
1245 applied = []
1246 qpp = None
1246 qpp = None
1247 for i in xrange(0, len(lines)):
1247 for i in xrange(0, len(lines)):
1248 if lines[i] == 'Patch Data:':
1248 if lines[i] == 'Patch Data:':
1249 datastart = i + 1
1249 datastart = i + 1
1250 elif lines[i].startswith('Dirstate:'):
1250 elif lines[i].startswith('Dirstate:'):
1251 l = lines[i].rstrip()
1251 l = lines[i].rstrip()
1252 l = l[10:].split(' ')
1252 l = l[10:].split(' ')
1253 qpp = [ hg.bin(x) for x in l ]
1253 qpp = [ hg.bin(x) for x in l ]
1254 elif datastart != None:
1254 elif datastart != None:
1255 l = lines[i].rstrip()
1255 l = lines[i].rstrip()
1256 se = statusentry(l)
1256 se = statusentry(l)
1257 file_ = se.name
1257 file_ = se.name
1258 if se.rev:
1258 if se.rev:
1259 applied.append(se)
1259 applied.append(se)
1260 else:
1260 else:
1261 series.append(file_)
1261 series.append(file_)
1262 if datastart == None:
1262 if datastart == None:
1263 self.ui.warn("No saved patch data found\n")
1263 self.ui.warn("No saved patch data found\n")
1264 return 1
1264 return 1
1265 self.ui.warn("restoring status: %s\n" % lines[0])
1265 self.ui.warn("restoring status: %s\n" % lines[0])
1266 self.full_series = series
1266 self.full_series = series
1267 self.applied = applied
1267 self.applied = applied
1268 self.parse_series()
1268 self.parse_series()
1269 self.series_dirty = 1
1269 self.series_dirty = 1
1270 self.applied_dirty = 1
1270 self.applied_dirty = 1
1271 heads = repo.changelog.heads()
1271 heads = repo.changelog.heads()
1272 if delete:
1272 if delete:
1273 if rev not in heads:
1273 if rev not in heads:
1274 self.ui.warn("save entry has children, leaving it alone\n")
1274 self.ui.warn("save entry has children, leaving it alone\n")
1275 else:
1275 else:
1276 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1276 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1277 pp = repo.dirstate.parents()
1277 pp = repo.dirstate.parents()
1278 if rev in pp:
1278 if rev in pp:
1279 update = True
1279 update = True
1280 else:
1280 else:
1281 update = False
1281 update = False
1282 self.strip(repo, rev, update=update, backup='strip')
1282 self.strip(repo, rev, update=update, backup='strip')
1283 if qpp:
1283 if qpp:
1284 self.ui.warn("saved queue repository parents: %s %s\n" %
1284 self.ui.warn("saved queue repository parents: %s %s\n" %
1285 (hg.short(qpp[0]), hg.short(qpp[1])))
1285 (hg.short(qpp[0]), hg.short(qpp[1])))
1286 if qupdate:
1286 if qupdate:
1287 self.ui.status(_("queue directory updating\n"))
1287 self.ui.status(_("queue directory updating\n"))
1288 r = self.qrepo()
1288 r = self.qrepo()
1289 if not r:
1289 if not r:
1290 self.ui.warn("Unable to load queue repository\n")
1290 self.ui.warn("Unable to load queue repository\n")
1291 return 1
1291 return 1
1292 hg.clean(r, qpp[0])
1292 hg.clean(r, qpp[0])
1293
1293
1294 def save(self, repo, msg=None):
1294 def save(self, repo, msg=None):
1295 if len(self.applied) == 0:
1295 if len(self.applied) == 0:
1296 self.ui.warn("save: no patches applied, exiting\n")
1296 self.ui.warn("save: no patches applied, exiting\n")
1297 return 1
1297 return 1
1298 if self.issaveline(self.applied[-1]):
1298 if self.issaveline(self.applied[-1]):
1299 self.ui.warn("status is already saved\n")
1299 self.ui.warn("status is already saved\n")
1300 return 1
1300 return 1
1301
1301
1302 ar = [ ':' + x for x in self.full_series ]
1302 ar = [ ':' + x for x in self.full_series ]
1303 if not msg:
1303 if not msg:
1304 msg = "hg patches saved state"
1304 msg = "hg patches saved state"
1305 else:
1305 else:
1306 msg = "hg patches: " + msg.rstrip('\r\n')
1306 msg = "hg patches: " + msg.rstrip('\r\n')
1307 r = self.qrepo()
1307 r = self.qrepo()
1308 if r:
1308 if r:
1309 pp = r.dirstate.parents()
1309 pp = r.dirstate.parents()
1310 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1310 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1311 msg += "\n\nPatch Data:\n"
1311 msg += "\n\nPatch Data:\n"
1312 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1312 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1313 "\n".join(ar) + '\n' or "")
1313 "\n".join(ar) + '\n' or "")
1314 n = repo.commit(None, text, user=None, force=1)
1314 n = repo.commit(None, text, user=None, force=1)
1315 if not n:
1315 if not n:
1316 self.ui.warn("repo commit failed\n")
1316 self.ui.warn("repo commit failed\n")
1317 return 1
1317 return 1
1318 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1318 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1319 self.applied_dirty = 1
1319 self.applied_dirty = 1
1320 self.removeundo(repo)
1320 self.removeundo(repo)
1321
1321
1322 def full_series_end(self):
1322 def full_series_end(self):
1323 if len(self.applied) > 0:
1323 if len(self.applied) > 0:
1324 p = self.applied[-1].name
1324 p = self.applied[-1].name
1325 end = self.find_series(p)
1325 end = self.find_series(p)
1326 if end == None:
1326 if end == None:
1327 return len(self.full_series)
1327 return len(self.full_series)
1328 return end + 1
1328 return end + 1
1329 return 0
1329 return 0
1330
1330
1331 def series_end(self, all_patches=False):
1331 def series_end(self, all_patches=False):
1332 """If all_patches is False, return the index of the next pushable patch
1332 """If all_patches is False, return the index of the next pushable patch
1333 in the series, or the series length. If all_patches is True, return the
1333 in the series, or the series length. If all_patches is True, return the
1334 index of the first patch past the last applied one.
1334 index of the first patch past the last applied one.
1335 """
1335 """
1336 end = 0
1336 end = 0
1337 def next(start):
1337 def next(start):
1338 if all_patches:
1338 if all_patches:
1339 return start
1339 return start
1340 i = start
1340 i = start
1341 while i < len(self.series):
1341 while i < len(self.series):
1342 p, reason = self.pushable(i)
1342 p, reason = self.pushable(i)
1343 if p:
1343 if p:
1344 break
1344 break
1345 self.explain_pushable(i)
1345 self.explain_pushable(i)
1346 i += 1
1346 i += 1
1347 return i
1347 return i
1348 if len(self.applied) > 0:
1348 if len(self.applied) > 0:
1349 p = self.applied[-1].name
1349 p = self.applied[-1].name
1350 try:
1350 try:
1351 end = self.series.index(p)
1351 end = self.series.index(p)
1352 except ValueError:
1352 except ValueError:
1353 return 0
1353 return 0
1354 return next(end + 1)
1354 return next(end + 1)
1355 return next(end)
1355 return next(end)
1356
1356
1357 def appliedname(self, index):
1357 def appliedname(self, index):
1358 pname = self.applied[index].name
1358 pname = self.applied[index].name
1359 if not self.ui.verbose:
1359 if not self.ui.verbose:
1360 p = pname
1360 p = pname
1361 else:
1361 else:
1362 p = str(self.series.index(pname)) + " " + pname
1362 p = str(self.series.index(pname)) + " " + pname
1363 return p
1363 return p
1364
1364
1365 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1365 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1366 force=None, git=False):
1366 force=None, git=False):
1367 def checkseries(patchname):
1367 def checkseries(patchname):
1368 if patchname in self.series:
1368 if patchname in self.series:
1369 raise util.Abort(_('patch %s is already in the series file')
1369 raise util.Abort(_('patch %s is already in the series file')
1370 % patchname)
1370 % patchname)
1371 def checkfile(patchname):
1371 def checkfile(patchname):
1372 if not force and os.path.exists(self.join(patchname)):
1372 if not force and os.path.exists(self.join(patchname)):
1373 raise util.Abort(_('patch "%s" already exists')
1373 raise util.Abort(_('patch "%s" already exists')
1374 % patchname)
1374 % patchname)
1375
1375
1376 if rev:
1376 if rev:
1377 if files:
1377 if files:
1378 raise util.Abort(_('option "-r" not valid when importing '
1378 raise util.Abort(_('option "-r" not valid when importing '
1379 'files'))
1379 'files'))
1380 rev = cmdutil.revrange(repo, rev)
1380 rev = cmdutil.revrange(repo, rev)
1381 rev.sort(lambda x, y: cmp(y, x))
1381 rev.sort(lambda x, y: cmp(y, x))
1382 if (len(files) > 1 or len(rev) > 1) and patchname:
1382 if (len(files) > 1 or len(rev) > 1) and patchname:
1383 raise util.Abort(_('option "-n" not valid when importing multiple '
1383 raise util.Abort(_('option "-n" not valid when importing multiple '
1384 'patches'))
1384 'patches'))
1385 i = 0
1385 i = 0
1386 added = []
1386 added = []
1387 if rev:
1387 if rev:
1388 # If mq patches are applied, we can only import revisions
1388 # If mq patches are applied, we can only import revisions
1389 # that form a linear path to qbase.
1389 # that form a linear path to qbase.
1390 # Otherwise, they should form a linear path to a head.
1390 # Otherwise, they should form a linear path to a head.
1391 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1391 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1392 if len(heads) > 1:
1392 if len(heads) > 1:
1393 raise util.Abort(_('revision %d is the root of more than one '
1393 raise util.Abort(_('revision %d is the root of more than one '
1394 'branch') % rev[-1])
1394 'branch') % rev[-1])
1395 if self.applied:
1395 if self.applied:
1396 base = revlog.hex(repo.changelog.node(rev[0]))
1396 base = revlog.hex(repo.changelog.node(rev[0]))
1397 if base in [n.rev for n in self.applied]:
1397 if base in [n.rev for n in self.applied]:
1398 raise util.Abort(_('revision %d is already managed')
1398 raise util.Abort(_('revision %d is already managed')
1399 % rev[0])
1399 % rev[0])
1400 if heads != [revlog.bin(self.applied[-1].rev)]:
1400 if heads != [revlog.bin(self.applied[-1].rev)]:
1401 raise util.Abort(_('revision %d is not the parent of '
1401 raise util.Abort(_('revision %d is not the parent of '
1402 'the queue') % rev[0])
1402 'the queue') % rev[0])
1403 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1403 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1404 lastparent = repo.changelog.parentrevs(base)[0]
1404 lastparent = repo.changelog.parentrevs(base)[0]
1405 else:
1405 else:
1406 if heads != [repo.changelog.node(rev[0])]:
1406 if heads != [repo.changelog.node(rev[0])]:
1407 raise util.Abort(_('revision %d has unmanaged children')
1407 raise util.Abort(_('revision %d has unmanaged children')
1408 % rev[0])
1408 % rev[0])
1409 lastparent = None
1409 lastparent = None
1410
1410
1411 if git:
1411 if git:
1412 self.diffopts().git = True
1412 self.diffopts().git = True
1413
1413
1414 for r in rev:
1414 for r in rev:
1415 p1, p2 = repo.changelog.parentrevs(r)
1415 p1, p2 = repo.changelog.parentrevs(r)
1416 n = repo.changelog.node(r)
1416 n = repo.changelog.node(r)
1417 if p2 != revlog.nullrev:
1417 if p2 != revlog.nullrev:
1418 raise util.Abort(_('cannot import merge revision %d') % r)
1418 raise util.Abort(_('cannot import merge revision %d') % r)
1419 if lastparent and lastparent != r:
1419 if lastparent and lastparent != r:
1420 raise util.Abort(_('revision %d is not the parent of %d')
1420 raise util.Abort(_('revision %d is not the parent of %d')
1421 % (r, lastparent))
1421 % (r, lastparent))
1422 lastparent = p1
1422 lastparent = p1
1423
1423
1424 if not patchname:
1424 if not patchname:
1425 patchname = normname('%d.diff' % r)
1425 patchname = normname('%d.diff' % r)
1426 self.check_reserved_name(patchname)
1426 self.check_reserved_name(patchname)
1427 checkseries(patchname)
1427 checkseries(patchname)
1428 checkfile(patchname)
1428 checkfile(patchname)
1429 self.full_series.insert(0, patchname)
1429 self.full_series.insert(0, patchname)
1430
1430
1431 patchf = self.opener(patchname, "w")
1431 patchf = self.opener(patchname, "w")
1432 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1432 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1433 patchf.close()
1433 patchf.close()
1434
1434
1435 se = statusentry(revlog.hex(n), patchname)
1435 se = statusentry(revlog.hex(n), patchname)
1436 self.applied.insert(0, se)
1436 self.applied.insert(0, se)
1437
1437
1438 added.append(patchname)
1438 added.append(patchname)
1439 patchname = None
1439 patchname = None
1440 self.parse_series()
1440 self.parse_series()
1441 self.applied_dirty = 1
1441 self.applied_dirty = 1
1442
1442
1443 for filename in files:
1443 for filename in files:
1444 if existing:
1444 if existing:
1445 if filename == '-':
1445 if filename == '-':
1446 raise util.Abort(_('-e is incompatible with import from -'))
1446 raise util.Abort(_('-e is incompatible with import from -'))
1447 if not patchname:
1447 if not patchname:
1448 patchname = normname(filename)
1448 patchname = normname(filename)
1449 self.check_reserved_name(patchname)
1449 self.check_reserved_name(patchname)
1450 if not os.path.isfile(self.join(patchname)):
1450 if not os.path.isfile(self.join(patchname)):
1451 raise util.Abort(_("patch %s does not exist") % patchname)
1451 raise util.Abort(_("patch %s does not exist") % patchname)
1452 else:
1452 else:
1453 try:
1453 try:
1454 if filename == '-':
1454 if filename == '-':
1455 if not patchname:
1455 if not patchname:
1456 raise util.Abort(_('need --name to import a patch from -'))
1456 raise util.Abort(_('need --name to import a patch from -'))
1457 text = sys.stdin.read()
1457 text = sys.stdin.read()
1458 else:
1458 else:
1459 text = file(filename, 'rb').read()
1459 text = file(filename, 'rb').read()
1460 except IOError:
1460 except IOError:
1461 raise util.Abort(_("unable to read %s") % patchname)
1461 raise util.Abort(_("unable to read %s") % patchname)
1462 if not patchname:
1462 if not patchname:
1463 patchname = normname(os.path.basename(filename))
1463 patchname = normname(os.path.basename(filename))
1464 self.check_reserved_name(patchname)
1464 self.check_reserved_name(patchname)
1465 checkfile(patchname)
1465 checkfile(patchname)
1466 patchf = self.opener(patchname, "w")
1466 patchf = self.opener(patchname, "w")
1467 patchf.write(text)
1467 patchf.write(text)
1468 checkseries(patchname)
1468 checkseries(patchname)
1469 index = self.full_series_end() + i
1469 index = self.full_series_end() + i
1470 self.full_series[index:index] = [patchname]
1470 self.full_series[index:index] = [patchname]
1471 self.parse_series()
1471 self.parse_series()
1472 self.ui.warn("adding %s to series file\n" % patchname)
1472 self.ui.warn("adding %s to series file\n" % patchname)
1473 i += 1
1473 i += 1
1474 added.append(patchname)
1474 added.append(patchname)
1475 patchname = None
1475 patchname = None
1476 self.series_dirty = 1
1476 self.series_dirty = 1
1477 qrepo = self.qrepo()
1477 qrepo = self.qrepo()
1478 if qrepo:
1478 if qrepo:
1479 qrepo.add(added)
1479 qrepo.add(added)
1480
1480
1481 def delete(ui, repo, *patches, **opts):
1481 def delete(ui, repo, *patches, **opts):
1482 """remove patches from queue
1482 """remove patches from queue
1483
1483
1484 The patches must not be applied, unless they are arguments to
1484 The patches must not be applied, unless they are arguments to
1485 the --rev parameter. At least one patch or revision is required.
1485 the --rev parameter. At least one patch or revision is required.
1486
1486
1487 With --rev, mq will stop managing the named revisions (converting
1487 With --rev, mq will stop managing the named revisions (converting
1488 them to regular mercurial changesets). The patches must be applied
1488 them to regular mercurial changesets). The patches must be applied
1489 and at the base of the stack. This option is useful when the patches
1489 and at the base of the stack. This option is useful when the patches
1490 have been applied upstream.
1490 have been applied upstream.
1491
1491
1492 With --keep, the patch files are preserved in the patch directory."""
1492 With --keep, the patch files are preserved in the patch directory."""
1493 q = repo.mq
1493 q = repo.mq
1494 q.delete(repo, patches, opts)
1494 q.delete(repo, patches, opts)
1495 q.save_dirty()
1495 q.save_dirty()
1496 return 0
1496 return 0
1497
1497
1498 def applied(ui, repo, patch=None, **opts):
1498 def applied(ui, repo, patch=None, **opts):
1499 """print the patches already applied"""
1499 """print the patches already applied"""
1500 q = repo.mq
1500 q = repo.mq
1501 if patch:
1501 if patch:
1502 if patch not in q.series:
1502 if patch not in q.series:
1503 raise util.Abort(_("patch %s is not in series file") % patch)
1503 raise util.Abort(_("patch %s is not in series file") % patch)
1504 end = q.series.index(patch) + 1
1504 end = q.series.index(patch) + 1
1505 else:
1505 else:
1506 end = q.series_end(True)
1506 end = q.series_end(True)
1507 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1507 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1508
1508
1509 def unapplied(ui, repo, patch=None, **opts):
1509 def unapplied(ui, repo, patch=None, **opts):
1510 """print the patches not yet applied"""
1510 """print the patches not yet applied"""
1511 q = repo.mq
1511 q = repo.mq
1512 if patch:
1512 if patch:
1513 if patch not in q.series:
1513 if patch not in q.series:
1514 raise util.Abort(_("patch %s is not in series file") % patch)
1514 raise util.Abort(_("patch %s is not in series file") % patch)
1515 start = q.series.index(patch) + 1
1515 start = q.series.index(patch) + 1
1516 else:
1516 else:
1517 start = q.series_end(True)
1517 start = q.series_end(True)
1518 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1518 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1519
1519
1520 def qimport(ui, repo, *filename, **opts):
1520 def qimport(ui, repo, *filename, **opts):
1521 """import a patch
1521 """import a patch
1522
1522
1523 The patch will have the same name as its source file unless you
1523 The patch will have the same name as its source file unless you
1524 give it a new one with --name.
1524 give it a new one with --name.
1525
1525
1526 You can register an existing patch inside the patch directory
1526 You can register an existing patch inside the patch directory
1527 with the --existing flag.
1527 with the --existing flag.
1528
1528
1529 With --force, an existing patch of the same name will be overwritten.
1529 With --force, an existing patch of the same name will be overwritten.
1530
1530
1531 An existing changeset may be placed under mq control with --rev
1531 An existing changeset may be placed under mq control with --rev
1532 (e.g. qimport --rev tip -n patch will place tip under mq control).
1532 (e.g. qimport --rev tip -n patch will place tip under mq control).
1533 With --git, patches imported with --rev will use the git diff
1533 With --git, patches imported with --rev will use the git diff
1534 format.
1534 format.
1535 """
1535 """
1536 q = repo.mq
1536 q = repo.mq
1537 q.qimport(repo, filename, patchname=opts['name'],
1537 q.qimport(repo, filename, patchname=opts['name'],
1538 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1538 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1539 git=opts['git'])
1539 git=opts['git'])
1540 q.save_dirty()
1540 q.save_dirty()
1541 return 0
1541 return 0
1542
1542
1543 def init(ui, repo, **opts):
1543 def init(ui, repo, **opts):
1544 """init a new queue repository
1544 """init a new queue repository
1545
1545
1546 The queue repository is unversioned by default. If -c is
1546 The queue repository is unversioned by default. If -c is
1547 specified, qinit will create a separate nested repository
1547 specified, qinit will create a separate nested repository
1548 for patches (qinit -c may also be run later to convert
1548 for patches (qinit -c may also be run later to convert
1549 an unversioned patch repository into a versioned one).
1549 an unversioned patch repository into a versioned one).
1550 You can use qcommit to commit changes to this queue repository."""
1550 You can use qcommit to commit changes to this queue repository."""
1551 q = repo.mq
1551 q = repo.mq
1552 r = q.init(repo, create=opts['create_repo'])
1552 r = q.init(repo, create=opts['create_repo'])
1553 q.save_dirty()
1553 q.save_dirty()
1554 if r:
1554 if r:
1555 if not os.path.exists(r.wjoin('.hgignore')):
1555 if not os.path.exists(r.wjoin('.hgignore')):
1556 fp = r.wopener('.hgignore', 'w')
1556 fp = r.wopener('.hgignore', 'w')
1557 fp.write('^\\.hg\n')
1557 fp.write('^\\.hg\n')
1558 fp.write('^\\.mq\n')
1558 fp.write('^\\.mq\n')
1559 fp.write('syntax: glob\n')
1559 fp.write('syntax: glob\n')
1560 fp.write('status\n')
1560 fp.write('status\n')
1561 fp.write('guards\n')
1561 fp.write('guards\n')
1562 fp.close()
1562 fp.close()
1563 if not os.path.exists(r.wjoin('series')):
1563 if not os.path.exists(r.wjoin('series')):
1564 r.wopener('series', 'w').close()
1564 r.wopener('series', 'w').close()
1565 r.add(['.hgignore', 'series'])
1565 r.add(['.hgignore', 'series'])
1566 commands.add(ui, r)
1566 commands.add(ui, r)
1567 return 0
1567 return 0
1568
1568
1569 def clone(ui, source, dest=None, **opts):
1569 def clone(ui, source, dest=None, **opts):
1570 '''clone main and patch repository at same time
1570 '''clone main and patch repository at same time
1571
1571
1572 If source is local, destination will have no patches applied. If
1572 If source is local, destination will have no patches applied. If
1573 source is remote, this command can not check if patches are
1573 source is remote, this command can not check if patches are
1574 applied in source, so cannot guarantee that patches are not
1574 applied in source, so cannot guarantee that patches are not
1575 applied in destination. If you clone remote repository, be sure
1575 applied in destination. If you clone remote repository, be sure
1576 before that it has no patches applied.
1576 before that it has no patches applied.
1577
1577
1578 Source patch repository is looked for in <src>/.hg/patches by
1578 Source patch repository is looked for in <src>/.hg/patches by
1579 default. Use -p <url> to change.
1579 default. Use -p <url> to change.
1580
1580
1581 The patch directory must be a nested mercurial repository, as
1581 The patch directory must be a nested mercurial repository, as
1582 would be created by qinit -c.
1582 would be created by qinit -c.
1583 '''
1583 '''
1584 def patchdir(repo):
1584 def patchdir(repo):
1585 url = repo.url()
1585 url = repo.url()
1586 if url.endswith('/'):
1586 if url.endswith('/'):
1587 url = url[:-1]
1587 url = url[:-1]
1588 return url + '/.hg/patches'
1588 return url + '/.hg/patches'
1589 cmdutil.setremoteconfig(ui, opts)
1589 cmdutil.setremoteconfig(ui, opts)
1590 if dest is None:
1590 if dest is None:
1591 dest = hg.defaultdest(source)
1591 dest = hg.defaultdest(source)
1592 sr = hg.repository(ui, ui.expandpath(source))
1592 sr = hg.repository(ui, ui.expandpath(source))
1593 patchespath = opts['patches'] or patchdir(sr)
1593 patchespath = opts['patches'] or patchdir(sr)
1594 try:
1594 try:
1595 pr = hg.repository(ui, patchespath)
1595 pr = hg.repository(ui, patchespath)
1596 except hg.RepoError:
1596 except hg.RepoError:
1597 raise util.Abort(_('versioned patch repository not found'
1597 raise util.Abort(_('versioned patch repository not found'
1598 ' (see qinit -c)'))
1598 ' (see qinit -c)'))
1599 qbase, destrev = None, None
1599 qbase, destrev = None, None
1600 if sr.local():
1600 if sr.local():
1601 if sr.mq.applied:
1601 if sr.mq.applied:
1602 qbase = revlog.bin(sr.mq.applied[0].rev)
1602 qbase = revlog.bin(sr.mq.applied[0].rev)
1603 if not hg.islocal(dest):
1603 if not hg.islocal(dest):
1604 heads = dict.fromkeys(sr.heads())
1604 heads = dict.fromkeys(sr.heads())
1605 for h in sr.heads(qbase):
1605 for h in sr.heads(qbase):
1606 del heads[h]
1606 del heads[h]
1607 destrev = heads.keys()
1607 destrev = heads.keys()
1608 destrev.append(sr.changelog.parents(qbase)[0])
1608 destrev.append(sr.changelog.parents(qbase)[0])
1609 ui.note(_('cloning main repo\n'))
1609 ui.note(_('cloning main repo\n'))
1610 sr, dr = hg.clone(ui, sr.url(), dest,
1610 sr, dr = hg.clone(ui, sr.url(), dest,
1611 pull=opts['pull'],
1611 pull=opts['pull'],
1612 rev=destrev,
1612 rev=destrev,
1613 update=False,
1613 update=False,
1614 stream=opts['uncompressed'])
1614 stream=opts['uncompressed'])
1615 ui.note(_('cloning patch repo\n'))
1615 ui.note(_('cloning patch repo\n'))
1616 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1616 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1617 pull=opts['pull'], update=not opts['noupdate'],
1617 pull=opts['pull'], update=not opts['noupdate'],
1618 stream=opts['uncompressed'])
1618 stream=opts['uncompressed'])
1619 if dr.local():
1619 if dr.local():
1620 if qbase:
1620 if qbase:
1621 ui.note(_('stripping applied patches from destination repo\n'))
1621 ui.note(_('stripping applied patches from destination repo\n'))
1622 dr.mq.strip(dr, qbase, update=False, backup=None)
1622 dr.mq.strip(dr, qbase, update=False, backup=None)
1623 if not opts['noupdate']:
1623 if not opts['noupdate']:
1624 ui.note(_('updating destination repo\n'))
1624 ui.note(_('updating destination repo\n'))
1625 hg.update(dr, dr.changelog.tip())
1625 hg.update(dr, dr.changelog.tip())
1626
1626
1627 def commit(ui, repo, *pats, **opts):
1627 def commit(ui, repo, *pats, **opts):
1628 """commit changes in the queue repository"""
1628 """commit changes in the queue repository"""
1629 q = repo.mq
1629 q = repo.mq
1630 r = q.qrepo()
1630 r = q.qrepo()
1631 if not r: raise util.Abort('no queue repository')
1631 if not r: raise util.Abort('no queue repository')
1632 commands.commit(r.ui, r, *pats, **opts)
1632 commands.commit(r.ui, r, *pats, **opts)
1633
1633
1634 def series(ui, repo, **opts):
1634 def series(ui, repo, **opts):
1635 """print the entire series file"""
1635 """print the entire series file"""
1636 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1636 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1637 return 0
1637 return 0
1638
1638
1639 def top(ui, repo, **opts):
1639 def top(ui, repo, **opts):
1640 """print the name of the current patch"""
1640 """print the name of the current patch"""
1641 q = repo.mq
1641 q = repo.mq
1642 t = q.applied and q.series_end(True) or 0
1642 t = q.applied and q.series_end(True) or 0
1643 if t:
1643 if t:
1644 return q.qseries(repo, start=t-1, length=1, status='A',
1644 return q.qseries(repo, start=t-1, length=1, status='A',
1645 summary=opts.get('summary'))
1645 summary=opts.get('summary'))
1646 else:
1646 else:
1647 ui.write("No patches applied\n")
1647 ui.write("No patches applied\n")
1648 return 1
1648 return 1
1649
1649
1650 def next(ui, repo, **opts):
1650 def next(ui, repo, **opts):
1651 """print the name of the next patch"""
1651 """print the name of the next patch"""
1652 q = repo.mq
1652 q = repo.mq
1653 end = q.series_end()
1653 end = q.series_end()
1654 if end == len(q.series):
1654 if end == len(q.series):
1655 ui.write("All patches applied\n")
1655 ui.write("All patches applied\n")
1656 return 1
1656 return 1
1657 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1657 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1658
1658
1659 def prev(ui, repo, **opts):
1659 def prev(ui, repo, **opts):
1660 """print the name of the previous patch"""
1660 """print the name of the previous patch"""
1661 q = repo.mq
1661 q = repo.mq
1662 l = len(q.applied)
1662 l = len(q.applied)
1663 if l == 1:
1663 if l == 1:
1664 ui.write("Only one patch applied\n")
1664 ui.write("Only one patch applied\n")
1665 return 1
1665 return 1
1666 if not l:
1666 if not l:
1667 ui.write("No patches applied\n")
1667 ui.write("No patches applied\n")
1668 return 1
1668 return 1
1669 return q.qseries(repo, start=l-2, length=1, status='A',
1669 return q.qseries(repo, start=l-2, length=1, status='A',
1670 summary=opts.get('summary'))
1670 summary=opts.get('summary'))
1671
1671
1672 def setupheaderopts(ui, opts):
1672 def setupheaderopts(ui, opts):
1673 def do(opt,val):
1673 def do(opt,val):
1674 if not opts[opt] and opts['current' + opt]:
1674 if not opts[opt] and opts['current' + opt]:
1675 opts[opt] = val
1675 opts[opt] = val
1676 do('user', ui.username())
1676 do('user', ui.username())
1677 do('date', "%d %d" % util.makedate())
1677 do('date', "%d %d" % util.makedate())
1678
1678
1679 def new(ui, repo, patch, *args, **opts):
1679 def new(ui, repo, patch, *args, **opts):
1680 """create a new patch
1680 """create a new patch
1681
1681
1682 qnew creates a new patch on top of the currently-applied patch
1682 qnew creates a new patch on top of the currently-applied patch
1683 (if any). It will refuse to run if there are any outstanding
1683 (if any). It will refuse to run if there are any outstanding
1684 changes unless -f is specified, in which case the patch will
1684 changes unless -f is specified, in which case the patch will
1685 be initialised with them. You may also use -I, -X, and/or a list of
1685 be initialised with them. You may also use -I, -X, and/or a list of
1686 files after the patch name to add only changes to matching files
1686 files after the patch name to add only changes to matching files
1687 to the new patch, leaving the rest as uncommitted modifications.
1687 to the new patch, leaving the rest as uncommitted modifications.
1688
1688
1689 -e, -m or -l set the patch header as well as the commit message.
1689 -e, -m or -l set the patch header as well as the commit message.
1690 If none is specified, the patch header is empty and the
1690 If none is specified, the patch header is empty and the
1691 commit message is '[mq]: PATCH'"""
1691 commit message is '[mq]: PATCH'"""
1692 q = repo.mq
1692 q = repo.mq
1693 message = cmdutil.logmessage(opts)
1693 message = cmdutil.logmessage(opts)
1694 if opts['edit']:
1694 if opts['edit']:
1695 message = ui.edit(message, ui.username())
1695 message = ui.edit(message, ui.username())
1696 opts['msg'] = message
1696 opts['msg'] = message
1697 setupheaderopts(ui, opts)
1697 setupheaderopts(ui, opts)
1698 q.new(repo, patch, *args, **opts)
1698 q.new(repo, patch, *args, **opts)
1699 q.save_dirty()
1699 q.save_dirty()
1700 return 0
1700 return 0
1701
1701
1702 def refresh(ui, repo, *pats, **opts):
1702 def refresh(ui, repo, *pats, **opts):
1703 """update the current patch
1703 """update the current patch
1704
1704
1705 If any file patterns are provided, the refreshed patch will contain only
1705 If any file patterns are provided, the refreshed patch will contain only
1706 the modifications that match those patterns; the remaining modifications
1706 the modifications that match those patterns; the remaining modifications
1707 will remain in the working directory.
1707 will remain in the working directory.
1708
1708
1709 hg add/remove/copy/rename work as usual, though you might want to use
1709 hg add/remove/copy/rename work as usual, though you might want to use
1710 git-style patches (--git or [diff] git=1) to track copies and renames.
1710 git-style patches (--git or [diff] git=1) to track copies and renames.
1711 """
1711 """
1712 q = repo.mq
1712 q = repo.mq
1713 message = cmdutil.logmessage(opts)
1713 message = cmdutil.logmessage(opts)
1714 if opts['edit']:
1714 if opts['edit']:
1715 if not q.applied:
1715 if not q.applied:
1716 ui.write(_("No patches applied\n"))
1716 ui.write(_("No patches applied\n"))
1717 return 1
1717 return 1
1718 if message:
1718 if message:
1719 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1719 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1720 patch = q.applied[-1].name
1720 patch = q.applied[-1].name
1721 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1721 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1722 message = ui.edit('\n'.join(message), user or ui.username())
1722 message = ui.edit('\n'.join(message), user or ui.username())
1723 setupheaderopts(ui, opts)
1723 setupheaderopts(ui, opts)
1724 ret = q.refresh(repo, pats, msg=message, **opts)
1724 ret = q.refresh(repo, pats, msg=message, **opts)
1725 q.save_dirty()
1725 q.save_dirty()
1726 return ret
1726 return ret
1727
1727
1728 def diff(ui, repo, *pats, **opts):
1728 def diff(ui, repo, *pats, **opts):
1729 """diff of the current patch"""
1729 """diff of the current patch"""
1730 repo.mq.diff(repo, pats, opts)
1730 repo.mq.diff(repo, pats, opts)
1731 return 0
1731 return 0
1732
1732
1733 def fold(ui, repo, *files, **opts):
1733 def fold(ui, repo, *files, **opts):
1734 """fold the named patches into the current patch
1734 """fold the named patches into the current patch
1735
1735
1736 Patches must not yet be applied. Each patch will be successively
1736 Patches must not yet be applied. Each patch will be successively
1737 applied to the current patch in the order given. If all the
1737 applied to the current patch in the order given. If all the
1738 patches apply successfully, the current patch will be refreshed
1738 patches apply successfully, the current patch will be refreshed
1739 with the new cumulative patch, and the folded patches will
1739 with the new cumulative patch, and the folded patches will
1740 be deleted. With -k/--keep, the folded patch files will not
1740 be deleted. With -k/--keep, the folded patch files will not
1741 be removed afterwards.
1741 be removed afterwards.
1742
1742
1743 The header for each folded patch will be concatenated with
1743 The header for each folded patch will be concatenated with
1744 the current patch header, separated by a line of '* * *'."""
1744 the current patch header, separated by a line of '* * *'."""
1745
1745
1746 q = repo.mq
1746 q = repo.mq
1747
1747
1748 if not files:
1748 if not files:
1749 raise util.Abort(_('qfold requires at least one patch name'))
1749 raise util.Abort(_('qfold requires at least one patch name'))
1750 if not q.check_toppatch(repo):
1750 if not q.check_toppatch(repo):
1751 raise util.Abort(_('No patches applied'))
1751 raise util.Abort(_('No patches applied'))
1752
1752
1753 message = cmdutil.logmessage(opts)
1753 message = cmdutil.logmessage(opts)
1754 if opts['edit']:
1754 if opts['edit']:
1755 if message:
1755 if message:
1756 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1756 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1757
1757
1758 parent = q.lookup('qtip')
1758 parent = q.lookup('qtip')
1759 patches = []
1759 patches = []
1760 messages = []
1760 messages = []
1761 for f in files:
1761 for f in files:
1762 p = q.lookup(f)
1762 p = q.lookup(f)
1763 if p in patches or p == parent:
1763 if p in patches or p == parent:
1764 ui.warn(_('Skipping already folded patch %s') % p)
1764 ui.warn(_('Skipping already folded patch %s') % p)
1765 if q.isapplied(p):
1765 if q.isapplied(p):
1766 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1766 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1767 patches.append(p)
1767 patches.append(p)
1768
1768
1769 for p in patches:
1769 for p in patches:
1770 if not message:
1770 if not message:
1771 messages.append(q.readheaders(p)[0])
1771 messages.append(q.readheaders(p)[0])
1772 pf = q.join(p)
1772 pf = q.join(p)
1773 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1773 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1774 if not patchsuccess:
1774 if not patchsuccess:
1775 raise util.Abort(_('Error folding patch %s') % p)
1775 raise util.Abort(_('Error folding patch %s') % p)
1776 patch.updatedir(ui, repo, files)
1776 patch.updatedir(ui, repo, files)
1777
1777
1778 if not message:
1778 if not message:
1779 message, comments, user = q.readheaders(parent)[0:3]
1779 message, comments, user = q.readheaders(parent)[0:3]
1780 for msg in messages:
1780 for msg in messages:
1781 message.append('* * *')
1781 message.append('* * *')
1782 message.extend(msg)
1782 message.extend(msg)
1783 message = '\n'.join(message)
1783 message = '\n'.join(message)
1784
1784
1785 if opts['edit']:
1785 if opts['edit']:
1786 message = ui.edit(message, user or ui.username())
1786 message = ui.edit(message, user or ui.username())
1787
1787
1788 q.refresh(repo, msg=message)
1788 q.refresh(repo, msg=message)
1789 q.delete(repo, patches, opts)
1789 q.delete(repo, patches, opts)
1790 q.save_dirty()
1790 q.save_dirty()
1791
1791
1792 def goto(ui, repo, patch, **opts):
1792 def goto(ui, repo, patch, **opts):
1793 '''push or pop patches until named patch is at top of stack'''
1793 '''push or pop patches until named patch is at top of stack'''
1794 q = repo.mq
1794 q = repo.mq
1795 patch = q.lookup(patch)
1795 patch = q.lookup(patch)
1796 if q.isapplied(patch):
1796 if q.isapplied(patch):
1797 ret = q.pop(repo, patch, force=opts['force'])
1797 ret = q.pop(repo, patch, force=opts['force'])
1798 else:
1798 else:
1799 ret = q.push(repo, patch, force=opts['force'])
1799 ret = q.push(repo, patch, force=opts['force'])
1800 q.save_dirty()
1800 q.save_dirty()
1801 return ret
1801 return ret
1802
1802
1803 def guard(ui, repo, *args, **opts):
1803 def guard(ui, repo, *args, **opts):
1804 '''set or print guards for a patch
1804 '''set or print guards for a patch
1805
1805
1806 Guards control whether a patch can be pushed. A patch with no
1806 Guards control whether a patch can be pushed. A patch with no
1807 guards is always pushed. A patch with a positive guard ("+foo") is
1807 guards is always pushed. A patch with a positive guard ("+foo") is
1808 pushed only if the qselect command has activated it. A patch with
1808 pushed only if the qselect command has activated it. A patch with
1809 a negative guard ("-foo") is never pushed if the qselect command
1809 a negative guard ("-foo") is never pushed if the qselect command
1810 has activated it.
1810 has activated it.
1811
1811
1812 With no arguments, print the currently active guards.
1812 With no arguments, print the currently active guards.
1813 With arguments, set guards for the named patch.
1813 With arguments, set guards for the named patch.
1814
1814
1815 To set a negative guard "-foo" on topmost patch ("--" is needed so
1815 To set a negative guard "-foo" on topmost patch ("--" is needed so
1816 hg will not interpret "-foo" as an option):
1816 hg will not interpret "-foo" as an option):
1817 hg qguard -- -foo
1817 hg qguard -- -foo
1818
1818
1819 To set guards on another patch:
1819 To set guards on another patch:
1820 hg qguard other.patch +2.6.17 -stable
1820 hg qguard other.patch +2.6.17 -stable
1821 '''
1821 '''
1822 def status(idx):
1822 def status(idx):
1823 guards = q.series_guards[idx] or ['unguarded']
1823 guards = q.series_guards[idx] or ['unguarded']
1824 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1824 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1825 q = repo.mq
1825 q = repo.mq
1826 patch = None
1826 patch = None
1827 args = list(args)
1827 args = list(args)
1828 if opts['list']:
1828 if opts['list']:
1829 if args or opts['none']:
1829 if args or opts['none']:
1830 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1830 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1831 for i in xrange(len(q.series)):
1831 for i in xrange(len(q.series)):
1832 status(i)
1832 status(i)
1833 return
1833 return
1834 if not args or args[0][0:1] in '-+':
1834 if not args or args[0][0:1] in '-+':
1835 if not q.applied:
1835 if not q.applied:
1836 raise util.Abort(_('no patches applied'))
1836 raise util.Abort(_('no patches applied'))
1837 patch = q.applied[-1].name
1837 patch = q.applied[-1].name
1838 if patch is None and args[0][0:1] not in '-+':
1838 if patch is None and args[0][0:1] not in '-+':
1839 patch = args.pop(0)
1839 patch = args.pop(0)
1840 if patch is None:
1840 if patch is None:
1841 raise util.Abort(_('no patch to work with'))
1841 raise util.Abort(_('no patch to work with'))
1842 if args or opts['none']:
1842 if args or opts['none']:
1843 idx = q.find_series(patch)
1843 idx = q.find_series(patch)
1844 if idx is None:
1844 if idx is None:
1845 raise util.Abort(_('no patch named %s') % patch)
1845 raise util.Abort(_('no patch named %s') % patch)
1846 q.set_guards(idx, args)
1846 q.set_guards(idx, args)
1847 q.save_dirty()
1847 q.save_dirty()
1848 else:
1848 else:
1849 status(q.series.index(q.lookup(patch)))
1849 status(q.series.index(q.lookup(patch)))
1850
1850
1851 def header(ui, repo, patch=None):
1851 def header(ui, repo, patch=None):
1852 """Print the header of the topmost or specified patch"""
1852 """Print the header of the topmost or specified patch"""
1853 q = repo.mq
1853 q = repo.mq
1854
1854
1855 if patch:
1855 if patch:
1856 patch = q.lookup(patch)
1856 patch = q.lookup(patch)
1857 else:
1857 else:
1858 if not q.applied:
1858 if not q.applied:
1859 ui.write('No patches applied\n')
1859 ui.write('No patches applied\n')
1860 return 1
1860 return 1
1861 patch = q.lookup('qtip')
1861 patch = q.lookup('qtip')
1862 message = repo.mq.readheaders(patch)[0]
1862 message = repo.mq.readheaders(patch)[0]
1863
1863
1864 ui.write('\n'.join(message) + '\n')
1864 ui.write('\n'.join(message) + '\n')
1865
1865
1866 def lastsavename(path):
1866 def lastsavename(path):
1867 (directory, base) = os.path.split(path)
1867 (directory, base) = os.path.split(path)
1868 names = os.listdir(directory)
1868 names = os.listdir(directory)
1869 namere = re.compile("%s.([0-9]+)" % base)
1869 namere = re.compile("%s.([0-9]+)" % base)
1870 maxindex = None
1870 maxindex = None
1871 maxname = None
1871 maxname = None
1872 for f in names:
1872 for f in names:
1873 m = namere.match(f)
1873 m = namere.match(f)
1874 if m:
1874 if m:
1875 index = int(m.group(1))
1875 index = int(m.group(1))
1876 if maxindex == None or index > maxindex:
1876 if maxindex == None or index > maxindex:
1877 maxindex = index
1877 maxindex = index
1878 maxname = f
1878 maxname = f
1879 if maxname:
1879 if maxname:
1880 return (os.path.join(directory, maxname), maxindex)
1880 return (os.path.join(directory, maxname), maxindex)
1881 return (None, None)
1881 return (None, None)
1882
1882
1883 def savename(path):
1883 def savename(path):
1884 (last, index) = lastsavename(path)
1884 (last, index) = lastsavename(path)
1885 if last is None:
1885 if last is None:
1886 index = 0
1886 index = 0
1887 newpath = path + ".%d" % (index + 1)
1887 newpath = path + ".%d" % (index + 1)
1888 return newpath
1888 return newpath
1889
1889
1890 def push(ui, repo, patch=None, **opts):
1890 def push(ui, repo, patch=None, **opts):
1891 """push the next patch onto the stack"""
1891 """push the next patch onto the stack"""
1892 q = repo.mq
1892 q = repo.mq
1893 mergeq = None
1893 mergeq = None
1894
1894
1895 if opts['all']:
1895 if opts['all']:
1896 if not q.series:
1896 if not q.series:
1897 ui.warn(_('no patches in series\n'))
1897 ui.warn(_('no patches in series\n'))
1898 return 0
1898 return 0
1899 patch = q.series[-1]
1899 patch = q.series[-1]
1900 if opts['merge']:
1900 if opts['merge']:
1901 if opts['name']:
1901 if opts['name']:
1902 newpath = opts['name']
1902 newpath = opts['name']
1903 else:
1903 else:
1904 newpath, i = lastsavename(q.path)
1904 newpath, i = lastsavename(q.path)
1905 if not newpath:
1905 if not newpath:
1906 ui.warn("no saved queues found, please use -n\n")
1906 ui.warn("no saved queues found, please use -n\n")
1907 return 1
1907 return 1
1908 mergeq = queue(ui, repo.join(""), newpath)
1908 mergeq = queue(ui, repo.join(""), newpath)
1909 ui.warn("merging with queue at: %s\n" % mergeq.path)
1909 ui.warn("merging with queue at: %s\n" % mergeq.path)
1910 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1910 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1911 mergeq=mergeq)
1911 mergeq=mergeq)
1912 return ret
1912 return ret
1913
1913
1914 def pop(ui, repo, patch=None, **opts):
1914 def pop(ui, repo, patch=None, **opts):
1915 """pop the current patch off the stack"""
1915 """pop the current patch off the stack"""
1916 localupdate = True
1916 localupdate = True
1917 if opts['name']:
1917 if opts['name']:
1918 q = queue(ui, repo.join(""), repo.join(opts['name']))
1918 q = queue(ui, repo.join(""), repo.join(opts['name']))
1919 ui.warn('using patch queue: %s\n' % q.path)
1919 ui.warn('using patch queue: %s\n' % q.path)
1920 localupdate = False
1920 localupdate = False
1921 else:
1921 else:
1922 q = repo.mq
1922 q = repo.mq
1923 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1923 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1924 all=opts['all'])
1924 all=opts['all'])
1925 q.save_dirty()
1925 q.save_dirty()
1926 return ret
1926 return ret
1927
1927
1928 def rename(ui, repo, patch, name=None, **opts):
1928 def rename(ui, repo, patch, name=None, **opts):
1929 """rename a patch
1929 """rename a patch
1930
1930
1931 With one argument, renames the current patch to PATCH1.
1931 With one argument, renames the current patch to PATCH1.
1932 With two arguments, renames PATCH1 to PATCH2."""
1932 With two arguments, renames PATCH1 to PATCH2."""
1933
1933
1934 q = repo.mq
1934 q = repo.mq
1935
1935
1936 if not name:
1936 if not name:
1937 name = patch
1937 name = patch
1938 patch = None
1938 patch = None
1939
1939
1940 if patch:
1940 if patch:
1941 patch = q.lookup(patch)
1941 patch = q.lookup(patch)
1942 else:
1942 else:
1943 if not q.applied:
1943 if not q.applied:
1944 ui.write(_('No patches applied\n'))
1944 ui.write(_('No patches applied\n'))
1945 return
1945 return
1946 patch = q.lookup('qtip')
1946 patch = q.lookup('qtip')
1947 absdest = q.join(name)
1947 absdest = q.join(name)
1948 if os.path.isdir(absdest):
1948 if os.path.isdir(absdest):
1949 name = normname(os.path.join(name, os.path.basename(patch)))
1949 name = normname(os.path.join(name, os.path.basename(patch)))
1950 absdest = q.join(name)
1950 absdest = q.join(name)
1951 if os.path.exists(absdest):
1951 if os.path.exists(absdest):
1952 raise util.Abort(_('%s already exists') % absdest)
1952 raise util.Abort(_('%s already exists') % absdest)
1953
1953
1954 if name in q.series:
1954 if name in q.series:
1955 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1955 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1956
1956
1957 if ui.verbose:
1957 if ui.verbose:
1958 ui.write('Renaming %s to %s\n' % (patch, name))
1958 ui.write('Renaming %s to %s\n' % (patch, name))
1959 i = q.find_series(patch)
1959 i = q.find_series(patch)
1960 guards = q.guard_re.findall(q.full_series[i])
1960 guards = q.guard_re.findall(q.full_series[i])
1961 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1961 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1962 q.parse_series()
1962 q.parse_series()
1963 q.series_dirty = 1
1963 q.series_dirty = 1
1964
1964
1965 info = q.isapplied(patch)
1965 info = q.isapplied(patch)
1966 if info:
1966 if info:
1967 q.applied[info[0]] = statusentry(info[1], name)
1967 q.applied[info[0]] = statusentry(info[1], name)
1968 q.applied_dirty = 1
1968 q.applied_dirty = 1
1969
1969
1970 util.rename(q.join(patch), absdest)
1970 util.rename(q.join(patch), absdest)
1971 r = q.qrepo()
1971 r = q.qrepo()
1972 if r:
1972 if r:
1973 wlock = r.wlock()
1973 wlock = r.wlock()
1974 try:
1974 try:
1975 if r.dirstate[name] == 'r':
1975 if r.dirstate[name] == 'r':
1976 r.undelete([name])
1976 r.undelete([name])
1977 r.copy(patch, name)
1977 r.copy(patch, name)
1978 r.remove([patch], False)
1978 r.remove([patch], False)
1979 finally:
1979 finally:
1980 del wlock
1980 del wlock
1981
1981
1982 q.save_dirty()
1982 q.save_dirty()
1983
1983
1984 def restore(ui, repo, rev, **opts):
1984 def restore(ui, repo, rev, **opts):
1985 """restore the queue state saved by a rev"""
1985 """restore the queue state saved by a rev"""
1986 rev = repo.lookup(rev)
1986 rev = repo.lookup(rev)
1987 q = repo.mq
1987 q = repo.mq
1988 q.restore(repo, rev, delete=opts['delete'],
1988 q.restore(repo, rev, delete=opts['delete'],
1989 qupdate=opts['update'])
1989 qupdate=opts['update'])
1990 q.save_dirty()
1990 q.save_dirty()
1991 return 0
1991 return 0
1992
1992
1993 def save(ui, repo, **opts):
1993 def save(ui, repo, **opts):
1994 """save current queue state"""
1994 """save current queue state"""
1995 q = repo.mq
1995 q = repo.mq
1996 message = cmdutil.logmessage(opts)
1996 message = cmdutil.logmessage(opts)
1997 ret = q.save(repo, msg=message)
1997 ret = q.save(repo, msg=message)
1998 if ret:
1998 if ret:
1999 return ret
1999 return ret
2000 q.save_dirty()
2000 q.save_dirty()
2001 if opts['copy']:
2001 if opts['copy']:
2002 path = q.path
2002 path = q.path
2003 if opts['name']:
2003 if opts['name']:
2004 newpath = os.path.join(q.basepath, opts['name'])
2004 newpath = os.path.join(q.basepath, opts['name'])
2005 if os.path.exists(newpath):
2005 if os.path.exists(newpath):
2006 if not os.path.isdir(newpath):
2006 if not os.path.isdir(newpath):
2007 raise util.Abort(_('destination %s exists and is not '
2007 raise util.Abort(_('destination %s exists and is not '
2008 'a directory') % newpath)
2008 'a directory') % newpath)
2009 if not opts['force']:
2009 if not opts['force']:
2010 raise util.Abort(_('destination %s exists, '
2010 raise util.Abort(_('destination %s exists, '
2011 'use -f to force') % newpath)
2011 'use -f to force') % newpath)
2012 else:
2012 else:
2013 newpath = savename(path)
2013 newpath = savename(path)
2014 ui.warn("copy %s to %s\n" % (path, newpath))
2014 ui.warn("copy %s to %s\n" % (path, newpath))
2015 util.copyfiles(path, newpath)
2015 util.copyfiles(path, newpath)
2016 if opts['empty']:
2016 if opts['empty']:
2017 try:
2017 try:
2018 os.unlink(q.join(q.status_path))
2018 os.unlink(q.join(q.status_path))
2019 except:
2019 except:
2020 pass
2020 pass
2021 return 0
2021 return 0
2022
2022
2023 def strip(ui, repo, rev, **opts):
2023 def strip(ui, repo, rev, **opts):
2024 """strip a revision and all later revs on the same branch"""
2024 """strip a revision and all later revs on the same branch"""
2025 rev = repo.lookup(rev)
2025 rev = repo.lookup(rev)
2026 backup = 'all'
2026 backup = 'all'
2027 if opts['backup']:
2027 if opts['backup']:
2028 backup = 'strip'
2028 backup = 'strip'
2029 elif opts['nobackup']:
2029 elif opts['nobackup']:
2030 backup = 'none'
2030 backup = 'none'
2031 update = repo.dirstate.parents()[0] != revlog.nullid
2031 update = repo.dirstate.parents()[0] != revlog.nullid
2032 repo.mq.strip(repo, rev, backup=backup, update=update)
2032 repo.mq.strip(repo, rev, backup=backup, update=update)
2033 return 0
2033 return 0
2034
2034
2035 def select(ui, repo, *args, **opts):
2035 def select(ui, repo, *args, **opts):
2036 '''set or print guarded patches to push
2036 '''set or print guarded patches to push
2037
2037
2038 Use the qguard command to set or print guards on patch, then use
2038 Use the qguard command to set or print guards on patch, then use
2039 qselect to tell mq which guards to use. A patch will be pushed if it
2039 qselect to tell mq which guards to use. A patch will be pushed if it
2040 has no guards or any positive guards match the currently selected guard,
2040 has no guards or any positive guards match the currently selected guard,
2041 but will not be pushed if any negative guards match the current guard.
2041 but will not be pushed if any negative guards match the current guard.
2042 For example:
2042 For example:
2043
2043
2044 qguard foo.patch -stable (negative guard)
2044 qguard foo.patch -stable (negative guard)
2045 qguard bar.patch +stable (positive guard)
2045 qguard bar.patch +stable (positive guard)
2046 qselect stable
2046 qselect stable
2047
2047
2048 This activates the "stable" guard. mq will skip foo.patch (because
2048 This activates the "stable" guard. mq will skip foo.patch (because
2049 it has a negative match) but push bar.patch (because it
2049 it has a negative match) but push bar.patch (because it
2050 has a positive match).
2050 has a positive match).
2051
2051
2052 With no arguments, prints the currently active guards.
2052 With no arguments, prints the currently active guards.
2053 With one argument, sets the active guard.
2053 With one argument, sets the active guard.
2054
2054
2055 Use -n/--none to deactivate guards (no other arguments needed).
2055 Use -n/--none to deactivate guards (no other arguments needed).
2056 When no guards are active, patches with positive guards are skipped
2056 When no guards are active, patches with positive guards are skipped
2057 and patches with negative guards are pushed.
2057 and patches with negative guards are pushed.
2058
2058
2059 qselect can change the guards on applied patches. It does not pop
2059 qselect can change the guards on applied patches. It does not pop
2060 guarded patches by default. Use --pop to pop back to the last applied
2060 guarded patches by default. Use --pop to pop back to the last applied
2061 patch that is not guarded. Use --reapply (which implies --pop) to push
2061 patch that is not guarded. Use --reapply (which implies --pop) to push
2062 back to the current patch afterwards, but skip guarded patches.
2062 back to the current patch afterwards, but skip guarded patches.
2063
2063
2064 Use -s/--series to print a list of all guards in the series file (no
2064 Use -s/--series to print a list of all guards in the series file (no
2065 other arguments needed). Use -v for more information.'''
2065 other arguments needed). Use -v for more information.'''
2066
2066
2067 q = repo.mq
2067 q = repo.mq
2068 guards = q.active()
2068 guards = q.active()
2069 if args or opts['none']:
2069 if args or opts['none']:
2070 old_unapplied = q.unapplied(repo)
2070 old_unapplied = q.unapplied(repo)
2071 old_guarded = [i for i in xrange(len(q.applied)) if
2071 old_guarded = [i for i in xrange(len(q.applied)) if
2072 not q.pushable(i)[0]]
2072 not q.pushable(i)[0]]
2073 q.set_active(args)
2073 q.set_active(args)
2074 q.save_dirty()
2074 q.save_dirty()
2075 if not args:
2075 if not args:
2076 ui.status(_('guards deactivated\n'))
2076 ui.status(_('guards deactivated\n'))
2077 if not opts['pop'] and not opts['reapply']:
2077 if not opts['pop'] and not opts['reapply']:
2078 unapplied = q.unapplied(repo)
2078 unapplied = q.unapplied(repo)
2079 guarded = [i for i in xrange(len(q.applied))
2079 guarded = [i for i in xrange(len(q.applied))
2080 if not q.pushable(i)[0]]
2080 if not q.pushable(i)[0]]
2081 if len(unapplied) != len(old_unapplied):
2081 if len(unapplied) != len(old_unapplied):
2082 ui.status(_('number of unguarded, unapplied patches has '
2082 ui.status(_('number of unguarded, unapplied patches has '
2083 'changed from %d to %d\n') %
2083 'changed from %d to %d\n') %
2084 (len(old_unapplied), len(unapplied)))
2084 (len(old_unapplied), len(unapplied)))
2085 if len(guarded) != len(old_guarded):
2085 if len(guarded) != len(old_guarded):
2086 ui.status(_('number of guarded, applied patches has changed '
2086 ui.status(_('number of guarded, applied patches has changed '
2087 'from %d to %d\n') %
2087 'from %d to %d\n') %
2088 (len(old_guarded), len(guarded)))
2088 (len(old_guarded), len(guarded)))
2089 elif opts['series']:
2089 elif opts['series']:
2090 guards = {}
2090 guards = {}
2091 noguards = 0
2091 noguards = 0
2092 for gs in q.series_guards:
2092 for gs in q.series_guards:
2093 if not gs:
2093 if not gs:
2094 noguards += 1
2094 noguards += 1
2095 for g in gs:
2095 for g in gs:
2096 guards.setdefault(g, 0)
2096 guards.setdefault(g, 0)
2097 guards[g] += 1
2097 guards[g] += 1
2098 if ui.verbose:
2098 if ui.verbose:
2099 guards['NONE'] = noguards
2099 guards['NONE'] = noguards
2100 guards = guards.items()
2100 guards = guards.items()
2101 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2101 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2102 if guards:
2102 if guards:
2103 ui.note(_('guards in series file:\n'))
2103 ui.note(_('guards in series file:\n'))
2104 for guard, count in guards:
2104 for guard, count in guards:
2105 ui.note('%2d ' % count)
2105 ui.note('%2d ' % count)
2106 ui.write(guard, '\n')
2106 ui.write(guard, '\n')
2107 else:
2107 else:
2108 ui.note(_('no guards in series file\n'))
2108 ui.note(_('no guards in series file\n'))
2109 else:
2109 else:
2110 if guards:
2110 if guards:
2111 ui.note(_('active guards:\n'))
2111 ui.note(_('active guards:\n'))
2112 for g in guards:
2112 for g in guards:
2113 ui.write(g, '\n')
2113 ui.write(g, '\n')
2114 else:
2114 else:
2115 ui.write(_('no active guards\n'))
2115 ui.write(_('no active guards\n'))
2116 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2116 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2117 popped = False
2117 popped = False
2118 if opts['pop'] or opts['reapply']:
2118 if opts['pop'] or opts['reapply']:
2119 for i in xrange(len(q.applied)):
2119 for i in xrange(len(q.applied)):
2120 pushable, reason = q.pushable(i)
2120 pushable, reason = q.pushable(i)
2121 if not pushable:
2121 if not pushable:
2122 ui.status(_('popping guarded patches\n'))
2122 ui.status(_('popping guarded patches\n'))
2123 popped = True
2123 popped = True
2124 if i == 0:
2124 if i == 0:
2125 q.pop(repo, all=True)
2125 q.pop(repo, all=True)
2126 else:
2126 else:
2127 q.pop(repo, i-1)
2127 q.pop(repo, i-1)
2128 break
2128 break
2129 if popped:
2129 if popped:
2130 try:
2130 try:
2131 if reapply:
2131 if reapply:
2132 ui.status(_('reapplying unguarded patches\n'))
2132 ui.status(_('reapplying unguarded patches\n'))
2133 q.push(repo, reapply)
2133 q.push(repo, reapply)
2134 finally:
2134 finally:
2135 q.save_dirty()
2135 q.save_dirty()
2136
2136
2137 def reposetup(ui, repo):
2137 def reposetup(ui, repo):
2138 class mqrepo(repo.__class__):
2138 class mqrepo(repo.__class__):
2139 def abort_if_wdir_patched(self, errmsg, force=False):
2139 def abort_if_wdir_patched(self, errmsg, force=False):
2140 if self.mq.applied and not force:
2140 if self.mq.applied and not force:
2141 parent = revlog.hex(self.dirstate.parents()[0])
2141 parent = revlog.hex(self.dirstate.parents()[0])
2142 if parent in [s.rev for s in self.mq.applied]:
2142 if parent in [s.rev for s in self.mq.applied]:
2143 raise util.Abort(errmsg)
2143 raise util.Abort(errmsg)
2144
2144
2145 def commit(self, *args, **opts):
2145 def commit(self, *args, **opts):
2146 if len(args) >= 6:
2146 if len(args) >= 6:
2147 force = args[5]
2147 force = args[5]
2148 else:
2148 else:
2149 force = opts.get('force')
2149 force = opts.get('force')
2150 self.abort_if_wdir_patched(
2150 self.abort_if_wdir_patched(
2151 _('cannot commit over an applied mq patch'),
2151 _('cannot commit over an applied mq patch'),
2152 force)
2152 force)
2153
2153
2154 return super(mqrepo, self).commit(*args, **opts)
2154 return super(mqrepo, self).commit(*args, **opts)
2155
2155
2156 def push(self, remote, force=False, revs=None):
2156 def push(self, remote, force=False, revs=None):
2157 if self.mq.applied and not force and not revs:
2157 if self.mq.applied and not force and not revs:
2158 raise util.Abort(_('source has mq patches applied'))
2158 raise util.Abort(_('source has mq patches applied'))
2159 return super(mqrepo, self).push(remote, force, revs)
2159 return super(mqrepo, self).push(remote, force, revs)
2160
2160
2161 def tags(self):
2161 def tags(self):
2162 if self.tagscache:
2162 if self.tagscache:
2163 return self.tagscache
2163 return self.tagscache
2164
2164
2165 tagscache = super(mqrepo, self).tags()
2165 tagscache = super(mqrepo, self).tags()
2166
2166
2167 q = self.mq
2167 q = self.mq
2168 if not q.applied:
2168 if not q.applied:
2169 return tagscache
2169 return tagscache
2170
2170
2171 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2171 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2172
2172
2173 if mqtags[-1][0] not in self.changelog.nodemap:
2173 if mqtags[-1][0] not in self.changelog.nodemap:
2174 self.ui.warn('mq status file refers to unknown node %s\n'
2174 self.ui.warn('mq status file refers to unknown node %s\n'
2175 % revlog.short(mqtags[-1][0]))
2175 % revlog.short(mqtags[-1][0]))
2176 return tagscache
2176 return tagscache
2177
2177
2178 mqtags.append((mqtags[-1][0], 'qtip'))
2178 mqtags.append((mqtags[-1][0], 'qtip'))
2179 mqtags.append((mqtags[0][0], 'qbase'))
2179 mqtags.append((mqtags[0][0], 'qbase'))
2180 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2180 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2181 for patch in mqtags:
2181 for patch in mqtags:
2182 if patch[1] in tagscache:
2182 if patch[1] in tagscache:
2183 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2183 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2184 else:
2184 else:
2185 tagscache[patch[1]] = patch[0]
2185 tagscache[patch[1]] = patch[0]
2186
2186
2187 return tagscache
2187 return tagscache
2188
2188
2189 def _branchtags(self):
2189 def _branchtags(self, partial, lrev):
2190 q = self.mq
2190 q = self.mq
2191 if not q.applied:
2191 if not q.applied:
2192 return super(mqrepo, self)._branchtags()
2192 return super(mqrepo, self)._branchtags(partial, lrev)
2193
2193
2194 cl = self.changelog
2194 cl = self.changelog
2195 qbasenode = revlog.bin(q.applied[0].rev)
2195 qbasenode = revlog.bin(q.applied[0].rev)
2196 if qbasenode not in cl.nodemap:
2196 if qbasenode not in cl.nodemap:
2197 self.ui.warn('mq status file refers to unknown node %s\n'
2197 self.ui.warn('mq status file refers to unknown node %s\n'
2198 % revlog.short(qbasenode))
2198 % revlog.short(qbasenode))
2199 return super(mqrepo, self)._branchtags()
2199 return super(mqrepo, self)._branchtags(partial, lrev)
2200
2201 self.branchcache = {} # avoid recursion in changectx
2202 partial, last, lrev = self._readbranchcache()
2203
2200
2204 qbase = cl.rev(qbasenode)
2201 qbase = cl.rev(qbasenode)
2205 start = lrev + 1
2202 start = lrev + 1
2206 if start < qbase:
2203 if start < qbase:
2207 # update the cache (excluding the patches) and save it
2204 # update the cache (excluding the patches) and save it
2208 self._updatebranchcache(partial, lrev+1, qbase)
2205 self._updatebranchcache(partial, lrev+1, qbase)
2209 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2206 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2210 start = qbase
2207 start = qbase
2211 # if start = qbase, the cache is as updated as it should be.
2208 # if start = qbase, the cache is as updated as it should be.
2212 # if start > qbase, the cache includes (part of) the patches.
2209 # if start > qbase, the cache includes (part of) the patches.
2213 # we might as well use it, but we won't save it.
2210 # we might as well use it, but we won't save it.
2214
2211
2215 # update the cache up to the tip
2212 # update the cache up to the tip
2216 self._updatebranchcache(partial, start, cl.count())
2213 self._updatebranchcache(partial, start, cl.count())
2217
2214
2218 return partial
2215 return partial
2219
2216
2220 if repo.local():
2217 if repo.local():
2221 repo.__class__ = mqrepo
2218 repo.__class__ = mqrepo
2222 repo.mq = queue(ui, repo.join(""))
2219 repo.mq = queue(ui, repo.join(""))
2223
2220
2224 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2221 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2225
2222
2226 headeropts = [
2223 headeropts = [
2227 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2224 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2228 ('u', 'user', '', _('add "From: <given user>" to patch')),
2225 ('u', 'user', '', _('add "From: <given user>" to patch')),
2229 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2226 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2230 ('d', 'date', '', _('add "Date: <given date>" to patch'))]
2227 ('d', 'date', '', _('add "Date: <given date>" to patch'))]
2231
2228
2232 cmdtable = {
2229 cmdtable = {
2233 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2230 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2234 "qclone":
2231 "qclone":
2235 (clone,
2232 (clone,
2236 [('', 'pull', None, _('use pull protocol to copy metadata')),
2233 [('', 'pull', None, _('use pull protocol to copy metadata')),
2237 ('U', 'noupdate', None, _('do not update the new working directories')),
2234 ('U', 'noupdate', None, _('do not update the new working directories')),
2238 ('', 'uncompressed', None,
2235 ('', 'uncompressed', None,
2239 _('use uncompressed transfer (fast over LAN)')),
2236 _('use uncompressed transfer (fast over LAN)')),
2240 ('p', 'patches', '', _('location of source patch repo')),
2237 ('p', 'patches', '', _('location of source patch repo')),
2241 ] + commands.remoteopts,
2238 ] + commands.remoteopts,
2242 _('hg qclone [OPTION]... SOURCE [DEST]')),
2239 _('hg qclone [OPTION]... SOURCE [DEST]')),
2243 "qcommit|qci":
2240 "qcommit|qci":
2244 (commit,
2241 (commit,
2245 commands.table["^commit|ci"][1],
2242 commands.table["^commit|ci"][1],
2246 _('hg qcommit [OPTION]... [FILE]...')),
2243 _('hg qcommit [OPTION]... [FILE]...')),
2247 "^qdiff":
2244 "^qdiff":
2248 (diff,
2245 (diff,
2249 [('g', 'git', None, _('use git extended diff format')),
2246 [('g', 'git', None, _('use git extended diff format')),
2250 ('U', 'unified', 3, _('number of lines of context to show')),
2247 ('U', 'unified', 3, _('number of lines of context to show')),
2251 ] + commands.walkopts,
2248 ] + commands.walkopts,
2252 _('hg qdiff [-I] [-X] [-U NUM] [-g] [FILE]...')),
2249 _('hg qdiff [-I] [-X] [-U NUM] [-g] [FILE]...')),
2253 "qdelete|qremove|qrm":
2250 "qdelete|qremove|qrm":
2254 (delete,
2251 (delete,
2255 [('k', 'keep', None, _('keep patch file')),
2252 [('k', 'keep', None, _('keep patch file')),
2256 ('r', 'rev', [], _('stop managing a revision'))],
2253 ('r', 'rev', [], _('stop managing a revision'))],
2257 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2254 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2258 'qfold':
2255 'qfold':
2259 (fold,
2256 (fold,
2260 [('e', 'edit', None, _('edit patch header')),
2257 [('e', 'edit', None, _('edit patch header')),
2261 ('k', 'keep', None, _('keep folded patch files')),
2258 ('k', 'keep', None, _('keep folded patch files')),
2262 ] + commands.commitopts,
2259 ] + commands.commitopts,
2263 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2260 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2264 'qgoto':
2261 'qgoto':
2265 (goto,
2262 (goto,
2266 [('f', 'force', None, _('overwrite any local changes'))],
2263 [('f', 'force', None, _('overwrite any local changes'))],
2267 _('hg qgoto [OPTION]... PATCH')),
2264 _('hg qgoto [OPTION]... PATCH')),
2268 'qguard':
2265 'qguard':
2269 (guard,
2266 (guard,
2270 [('l', 'list', None, _('list all patches and guards')),
2267 [('l', 'list', None, _('list all patches and guards')),
2271 ('n', 'none', None, _('drop all guards'))],
2268 ('n', 'none', None, _('drop all guards'))],
2272 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2269 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2273 'qheader': (header, [], _('hg qheader [PATCH]')),
2270 'qheader': (header, [], _('hg qheader [PATCH]')),
2274 "^qimport":
2271 "^qimport":
2275 (qimport,
2272 (qimport,
2276 [('e', 'existing', None, 'import file in patch dir'),
2273 [('e', 'existing', None, 'import file in patch dir'),
2277 ('n', 'name', '', 'patch file name'),
2274 ('n', 'name', '', 'patch file name'),
2278 ('f', 'force', None, 'overwrite existing files'),
2275 ('f', 'force', None, 'overwrite existing files'),
2279 ('r', 'rev', [], 'place existing revisions under mq control'),
2276 ('r', 'rev', [], 'place existing revisions under mq control'),
2280 ('g', 'git', None, _('use git extended diff format'))],
2277 ('g', 'git', None, _('use git extended diff format'))],
2281 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2278 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2282 "^qinit":
2279 "^qinit":
2283 (init,
2280 (init,
2284 [('c', 'create-repo', None, 'create queue repository')],
2281 [('c', 'create-repo', None, 'create queue repository')],
2285 _('hg qinit [-c]')),
2282 _('hg qinit [-c]')),
2286 "qnew":
2283 "qnew":
2287 (new,
2284 (new,
2288 [('e', 'edit', None, _('edit commit message')),
2285 [('e', 'edit', None, _('edit commit message')),
2289 ('f', 'force', None, _('import uncommitted changes into patch')),
2286 ('f', 'force', None, _('import uncommitted changes into patch')),
2290 ('g', 'git', None, _('use git extended diff format')),
2287 ('g', 'git', None, _('use git extended diff format')),
2291 ] + commands.walkopts + commands.commitopts + headeropts,
2288 ] + commands.walkopts + commands.commitopts + headeropts,
2292 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2289 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2293 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2290 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2294 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2291 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2295 "^qpop":
2292 "^qpop":
2296 (pop,
2293 (pop,
2297 [('a', 'all', None, _('pop all patches')),
2294 [('a', 'all', None, _('pop all patches')),
2298 ('n', 'name', '', _('queue name to pop')),
2295 ('n', 'name', '', _('queue name to pop')),
2299 ('f', 'force', None, _('forget any local changes'))],
2296 ('f', 'force', None, _('forget any local changes'))],
2300 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2297 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2301 "^qpush":
2298 "^qpush":
2302 (push,
2299 (push,
2303 [('f', 'force', None, _('apply if the patch has rejects')),
2300 [('f', 'force', None, _('apply if the patch has rejects')),
2304 ('l', 'list', None, _('list patch name in commit text')),
2301 ('l', 'list', None, _('list patch name in commit text')),
2305 ('a', 'all', None, _('apply all patches')),
2302 ('a', 'all', None, _('apply all patches')),
2306 ('m', 'merge', None, _('merge from another queue')),
2303 ('m', 'merge', None, _('merge from another queue')),
2307 ('n', 'name', '', _('merge queue name'))],
2304 ('n', 'name', '', _('merge queue name'))],
2308 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2305 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2309 "^qrefresh":
2306 "^qrefresh":
2310 (refresh,
2307 (refresh,
2311 [('e', 'edit', None, _('edit commit message')),
2308 [('e', 'edit', None, _('edit commit message')),
2312 ('g', 'git', None, _('use git extended diff format')),
2309 ('g', 'git', None, _('use git extended diff format')),
2313 ('s', 'short', None, _('refresh only files already in the patch')),
2310 ('s', 'short', None, _('refresh only files already in the patch')),
2314 ] + commands.walkopts + commands.commitopts + headeropts,
2311 ] + commands.walkopts + commands.commitopts + headeropts,
2315 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2312 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2316 'qrename|qmv':
2313 'qrename|qmv':
2317 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2314 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2318 "qrestore":
2315 "qrestore":
2319 (restore,
2316 (restore,
2320 [('d', 'delete', None, _('delete save entry')),
2317 [('d', 'delete', None, _('delete save entry')),
2321 ('u', 'update', None, _('update queue working dir'))],
2318 ('u', 'update', None, _('update queue working dir'))],
2322 _('hg qrestore [-d] [-u] REV')),
2319 _('hg qrestore [-d] [-u] REV')),
2323 "qsave":
2320 "qsave":
2324 (save,
2321 (save,
2325 [('c', 'copy', None, _('copy patch directory')),
2322 [('c', 'copy', None, _('copy patch directory')),
2326 ('n', 'name', '', _('copy directory name')),
2323 ('n', 'name', '', _('copy directory name')),
2327 ('e', 'empty', None, _('clear queue status file')),
2324 ('e', 'empty', None, _('clear queue status file')),
2328 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2325 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2329 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2326 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2330 "qselect":
2327 "qselect":
2331 (select,
2328 (select,
2332 [('n', 'none', None, _('disable all guards')),
2329 [('n', 'none', None, _('disable all guards')),
2333 ('s', 'series', None, _('list all guards in series file')),
2330 ('s', 'series', None, _('list all guards in series file')),
2334 ('', 'pop', None, _('pop to before first guarded applied patch')),
2331 ('', 'pop', None, _('pop to before first guarded applied patch')),
2335 ('', 'reapply', None, _('pop, then reapply patches'))],
2332 ('', 'reapply', None, _('pop, then reapply patches'))],
2336 _('hg qselect [OPTION]... [GUARD]...')),
2333 _('hg qselect [OPTION]... [GUARD]...')),
2337 "qseries":
2334 "qseries":
2338 (series,
2335 (series,
2339 [('m', 'missing', None, _('print patches not in series')),
2336 [('m', 'missing', None, _('print patches not in series')),
2340 ] + seriesopts,
2337 ] + seriesopts,
2341 _('hg qseries [-ms]')),
2338 _('hg qseries [-ms]')),
2342 "^strip":
2339 "^strip":
2343 (strip,
2340 (strip,
2344 [('f', 'force', None, _('force multi-head removal')),
2341 [('f', 'force', None, _('force multi-head removal')),
2345 ('b', 'backup', None, _('bundle unrelated changesets')),
2342 ('b', 'backup', None, _('bundle unrelated changesets')),
2346 ('n', 'nobackup', None, _('no backups'))],
2343 ('n', 'nobackup', None, _('no backups'))],
2347 _('hg strip [-f] [-b] [-n] REV')),
2344 _('hg strip [-f] [-b] [-n] REV')),
2348 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2345 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2349 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2346 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2350 }
2347 }
@@ -1,2106 +1,2105 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self.nodetagscache = None
98 self.nodetagscache = None
99 self.filterpats = {}
99 self.filterpats = {}
100 self._datafilters = {}
100 self._datafilters = {}
101 self._transref = self._lockref = self._wlockref = None
101 self._transref = self._lockref = self._wlockref = None
102
102
103 def __getattr__(self, name):
103 def __getattr__(self, name):
104 if name == 'changelog':
104 if name == 'changelog':
105 self.changelog = changelog.changelog(self.sopener)
105 self.changelog = changelog.changelog(self.sopener)
106 self.sopener.defversion = self.changelog.version
106 self.sopener.defversion = self.changelog.version
107 return self.changelog
107 return self.changelog
108 if name == 'manifest':
108 if name == 'manifest':
109 self.changelog
109 self.changelog
110 self.manifest = manifest.manifest(self.sopener)
110 self.manifest = manifest.manifest(self.sopener)
111 return self.manifest
111 return self.manifest
112 if name == 'dirstate':
112 if name == 'dirstate':
113 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
113 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 return self.dirstate
114 return self.dirstate
115 else:
115 else:
116 raise AttributeError, name
116 raise AttributeError, name
117
117
118 def url(self):
118 def url(self):
119 return 'file:' + self.root
119 return 'file:' + self.root
120
120
121 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
123
123
124 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
125
125
126 def _tag(self, name, node, message, local, user, date, parent=None,
126 def _tag(self, name, node, message, local, user, date, parent=None,
127 extra={}):
127 extra={}):
128 use_dirstate = parent is None
128 use_dirstate = parent is None
129
129
130 for c in self.tag_disallowed:
130 for c in self.tag_disallowed:
131 if c in name:
131 if c in name:
132 raise util.Abort(_('%r cannot be used in a tag name') % c)
132 raise util.Abort(_('%r cannot be used in a tag name') % c)
133
133
134 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
134 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135
135
136 def writetag(fp, name, munge, prevtags):
136 def writetag(fp, name, munge, prevtags):
137 fp.seek(0, 2)
137 fp.seek(0, 2)
138 if prevtags and prevtags[-1] != '\n':
138 if prevtags and prevtags[-1] != '\n':
139 fp.write('\n')
139 fp.write('\n')
140 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
140 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 fp.close()
141 fp.close()
142
142
143 prevtags = ''
143 prevtags = ''
144 if local:
144 if local:
145 try:
145 try:
146 fp = self.opener('localtags', 'r+')
146 fp = self.opener('localtags', 'r+')
147 except IOError, err:
147 except IOError, err:
148 fp = self.opener('localtags', 'a')
148 fp = self.opener('localtags', 'a')
149 else:
149 else:
150 prevtags = fp.read()
150 prevtags = fp.read()
151
151
152 # local tags are stored in the current charset
152 # local tags are stored in the current charset
153 writetag(fp, name, None, prevtags)
153 writetag(fp, name, None, prevtags)
154 self.hook('tag', node=hex(node), tag=name, local=local)
154 self.hook('tag', node=hex(node), tag=name, local=local)
155 return
155 return
156
156
157 if use_dirstate:
157 if use_dirstate:
158 try:
158 try:
159 fp = self.wfile('.hgtags', 'rb+')
159 fp = self.wfile('.hgtags', 'rb+')
160 except IOError, err:
160 except IOError, err:
161 fp = self.wfile('.hgtags', 'ab')
161 fp = self.wfile('.hgtags', 'ab')
162 else:
162 else:
163 prevtags = fp.read()
163 prevtags = fp.read()
164 else:
164 else:
165 try:
165 try:
166 prevtags = self.filectx('.hgtags', parent).data()
166 prevtags = self.filectx('.hgtags', parent).data()
167 except revlog.LookupError:
167 except revlog.LookupError:
168 pass
168 pass
169 fp = self.wfile('.hgtags', 'wb')
169 fp = self.wfile('.hgtags', 'wb')
170 if prevtags:
170 if prevtags:
171 fp.write(prevtags)
171 fp.write(prevtags)
172
172
173 # committed tags are stored in UTF-8
173 # committed tags are stored in UTF-8
174 writetag(fp, name, util.fromlocal, prevtags)
174 writetag(fp, name, util.fromlocal, prevtags)
175
175
176 if use_dirstate and '.hgtags' not in self.dirstate:
176 if use_dirstate and '.hgtags' not in self.dirstate:
177 self.add(['.hgtags'])
177 self.add(['.hgtags'])
178
178
179 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
179 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 extra=extra)
180 extra=extra)
181
181
182 self.hook('tag', node=hex(node), tag=name, local=local)
182 self.hook('tag', node=hex(node), tag=name, local=local)
183
183
184 return tagnode
184 return tagnode
185
185
186 def tag(self, name, node, message, local, user, date):
186 def tag(self, name, node, message, local, user, date):
187 '''tag a revision with a symbolic name.
187 '''tag a revision with a symbolic name.
188
188
189 if local is True, the tag is stored in a per-repository file.
189 if local is True, the tag is stored in a per-repository file.
190 otherwise, it is stored in the .hgtags file, and a new
190 otherwise, it is stored in the .hgtags file, and a new
191 changeset is committed with the change.
191 changeset is committed with the change.
192
192
193 keyword arguments:
193 keyword arguments:
194
194
195 local: whether to store tag in non-version-controlled file
195 local: whether to store tag in non-version-controlled file
196 (default False)
196 (default False)
197
197
198 message: commit message to use if committing
198 message: commit message to use if committing
199
199
200 user: name of user to use if committing
200 user: name of user to use if committing
201
201
202 date: date tuple to use if committing'''
202 date: date tuple to use if committing'''
203
203
204 for x in self.status()[:5]:
204 for x in self.status()[:5]:
205 if '.hgtags' in x:
205 if '.hgtags' in x:
206 raise util.Abort(_('working copy of .hgtags is changed '
206 raise util.Abort(_('working copy of .hgtags is changed '
207 '(please commit .hgtags manually)'))
207 '(please commit .hgtags manually)'))
208
208
209
209
210 self._tag(name, node, message, local, user, date)
210 self._tag(name, node, message, local, user, date)
211
211
212 def tags(self):
212 def tags(self):
213 '''return a mapping of tag to node'''
213 '''return a mapping of tag to node'''
214 if self.tagscache:
214 if self.tagscache:
215 return self.tagscache
215 return self.tagscache
216
216
217 globaltags = {}
217 globaltags = {}
218 tagtypes = {}
218 tagtypes = {}
219
219
220 def readtags(lines, fn, tagtype):
220 def readtags(lines, fn, tagtype):
221 filetags = {}
221 filetags = {}
222 count = 0
222 count = 0
223
223
224 def warn(msg):
224 def warn(msg):
225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226
226
227 for l in lines:
227 for l in lines:
228 count += 1
228 count += 1
229 if not l:
229 if not l:
230 continue
230 continue
231 s = l.split(" ", 1)
231 s = l.split(" ", 1)
232 if len(s) != 2:
232 if len(s) != 2:
233 warn(_("cannot parse entry"))
233 warn(_("cannot parse entry"))
234 continue
234 continue
235 node, key = s
235 node, key = s
236 key = util.tolocal(key.strip()) # stored in UTF-8
236 key = util.tolocal(key.strip()) # stored in UTF-8
237 try:
237 try:
238 bin_n = bin(node)
238 bin_n = bin(node)
239 except TypeError:
239 except TypeError:
240 warn(_("node '%s' is not well formed") % node)
240 warn(_("node '%s' is not well formed") % node)
241 continue
241 continue
242 if bin_n not in self.changelog.nodemap:
242 if bin_n not in self.changelog.nodemap:
243 warn(_("tag '%s' refers to unknown node") % key)
243 warn(_("tag '%s' refers to unknown node") % key)
244 continue
244 continue
245
245
246 h = []
246 h = []
247 if key in filetags:
247 if key in filetags:
248 n, h = filetags[key]
248 n, h = filetags[key]
249 h.append(n)
249 h.append(n)
250 filetags[key] = (bin_n, h)
250 filetags[key] = (bin_n, h)
251
251
252 for k, nh in filetags.items():
252 for k, nh in filetags.items():
253 if k not in globaltags:
253 if k not in globaltags:
254 globaltags[k] = nh
254 globaltags[k] = nh
255 tagtypes[k] = tagtype
255 tagtypes[k] = tagtype
256 continue
256 continue
257
257
258 # we prefer the global tag if:
258 # we prefer the global tag if:
259 # it supercedes us OR
259 # it supercedes us OR
260 # mutual supercedes and it has a higher rank
260 # mutual supercedes and it has a higher rank
261 # otherwise we win because we're tip-most
261 # otherwise we win because we're tip-most
262 an, ah = nh
262 an, ah = nh
263 bn, bh = globaltags[k]
263 bn, bh = globaltags[k]
264 if (bn != an and an in bh and
264 if (bn != an and an in bh and
265 (bn not in ah or len(bh) > len(ah))):
265 (bn not in ah or len(bh) > len(ah))):
266 an = bn
266 an = bn
267 ah.extend([n for n in bh if n not in ah])
267 ah.extend([n for n in bh if n not in ah])
268 globaltags[k] = an, ah
268 globaltags[k] = an, ah
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270
270
271 # read the tags file from each head, ending with the tip
271 # read the tags file from each head, ending with the tip
272 f = None
272 f = None
273 for rev, node, fnode in self._hgtagsnodes():
273 for rev, node, fnode in self._hgtagsnodes():
274 f = (f and f.filectx(fnode) or
274 f = (f and f.filectx(fnode) or
275 self.filectx('.hgtags', fileid=fnode))
275 self.filectx('.hgtags', fileid=fnode))
276 readtags(f.data().splitlines(), f, "global")
276 readtags(f.data().splitlines(), f, "global")
277
277
278 try:
278 try:
279 data = util.fromlocal(self.opener("localtags").read())
279 data = util.fromlocal(self.opener("localtags").read())
280 # localtags are stored in the local character set
280 # localtags are stored in the local character set
281 # while the internal tag table is stored in UTF-8
281 # while the internal tag table is stored in UTF-8
282 readtags(data.splitlines(), "localtags", "local")
282 readtags(data.splitlines(), "localtags", "local")
283 except IOError:
283 except IOError:
284 pass
284 pass
285
285
286 self.tagscache = {}
286 self.tagscache = {}
287 self._tagstypecache = {}
287 self._tagstypecache = {}
288 for k,nh in globaltags.items():
288 for k,nh in globaltags.items():
289 n = nh[0]
289 n = nh[0]
290 if n != nullid:
290 if n != nullid:
291 self.tagscache[k] = n
291 self.tagscache[k] = n
292 self._tagstypecache[k] = tagtypes[k]
292 self._tagstypecache[k] = tagtypes[k]
293 self.tagscache['tip'] = self.changelog.tip()
293 self.tagscache['tip'] = self.changelog.tip()
294
294
295 return self.tagscache
295 return self.tagscache
296
296
297 def tagtype(self, tagname):
297 def tagtype(self, tagname):
298 '''
298 '''
299 return the type of the given tag. result can be:
299 return the type of the given tag. result can be:
300
300
301 'local' : a local tag
301 'local' : a local tag
302 'global' : a global tag
302 'global' : a global tag
303 None : tag does not exist
303 None : tag does not exist
304 '''
304 '''
305
305
306 self.tags()
306 self.tags()
307
307
308 return self._tagstypecache.get(tagname)
308 return self._tagstypecache.get(tagname)
309
309
310 def _hgtagsnodes(self):
310 def _hgtagsnodes(self):
311 heads = self.heads()
311 heads = self.heads()
312 heads.reverse()
312 heads.reverse()
313 last = {}
313 last = {}
314 ret = []
314 ret = []
315 for node in heads:
315 for node in heads:
316 c = self.changectx(node)
316 c = self.changectx(node)
317 rev = c.rev()
317 rev = c.rev()
318 try:
318 try:
319 fnode = c.filenode('.hgtags')
319 fnode = c.filenode('.hgtags')
320 except revlog.LookupError:
320 except revlog.LookupError:
321 continue
321 continue
322 ret.append((rev, node, fnode))
322 ret.append((rev, node, fnode))
323 if fnode in last:
323 if fnode in last:
324 ret[last[fnode]] = None
324 ret[last[fnode]] = None
325 last[fnode] = len(ret) - 1
325 last[fnode] = len(ret) - 1
326 return [item for item in ret if item]
326 return [item for item in ret if item]
327
327
328 def tagslist(self):
328 def tagslist(self):
329 '''return a list of tags ordered by revision'''
329 '''return a list of tags ordered by revision'''
330 l = []
330 l = []
331 for t, n in self.tags().items():
331 for t, n in self.tags().items():
332 try:
332 try:
333 r = self.changelog.rev(n)
333 r = self.changelog.rev(n)
334 except:
334 except:
335 r = -2 # sort to the beginning of the list if unknown
335 r = -2 # sort to the beginning of the list if unknown
336 l.append((r, t, n))
336 l.append((r, t, n))
337 l.sort()
337 l.sort()
338 return [(t, n) for r, t, n in l]
338 return [(t, n) for r, t, n in l]
339
339
340 def nodetags(self, node):
340 def nodetags(self, node):
341 '''return the tags associated with a node'''
341 '''return the tags associated with a node'''
342 if not self.nodetagscache:
342 if not self.nodetagscache:
343 self.nodetagscache = {}
343 self.nodetagscache = {}
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 self.nodetagscache.setdefault(n, []).append(t)
345 self.nodetagscache.setdefault(n, []).append(t)
346 return self.nodetagscache.get(node, [])
346 return self.nodetagscache.get(node, [])
347
347
348 def _branchtags(self):
348 def _branchtags(self, partial, lrev):
349 partial, last, lrev = self._readbranchcache()
350
351 tiprev = self.changelog.count() - 1
349 tiprev = self.changelog.count() - 1
352 if lrev != tiprev:
350 if lrev != tiprev:
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
351 self._updatebranchcache(partial, lrev+1, tiprev+1)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
355
353
356 return partial
354 return partial
357
355
358 def branchtags(self):
356 def branchtags(self):
359 if self.branchcache is not None:
357 if self.branchcache is not None:
360 return self.branchcache
358 return self.branchcache
361
359
362 self.branchcache = {} # avoid recursion in changectx
360 self.branchcache = {} # avoid recursion in changectx
363 partial = self._branchtags()
361 partial, last, lrev = self._readbranchcache()
362 self._branchtags(partial, lrev)
364
363
365 # the branch cache is stored on disk as UTF-8, but in the local
364 # the branch cache is stored on disk as UTF-8, but in the local
366 # charset internally
365 # charset internally
367 for k, v in partial.items():
366 for k, v in partial.items():
368 self.branchcache[util.tolocal(k)] = v
367 self.branchcache[util.tolocal(k)] = v
369 self._ubranchcache = partial
368 self._ubranchcache = partial
370 return self.branchcache
369 return self.branchcache
371
370
372 def _readbranchcache(self):
371 def _readbranchcache(self):
373 partial = {}
372 partial = {}
374 try:
373 try:
375 f = self.opener("branch.cache")
374 f = self.opener("branch.cache")
376 lines = f.read().split('\n')
375 lines = f.read().split('\n')
377 f.close()
376 f.close()
378 except (IOError, OSError):
377 except (IOError, OSError):
379 return {}, nullid, nullrev
378 return {}, nullid, nullrev
380
379
381 try:
380 try:
382 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = lines.pop(0).split(" ", 1)
383 last, lrev = bin(last), int(lrev)
382 last, lrev = bin(last), int(lrev)
384 if not (lrev < self.changelog.count() and
383 if not (lrev < self.changelog.count() and
385 self.changelog.node(lrev) == last): # sanity check
384 self.changelog.node(lrev) == last): # sanity check
386 # invalidate the cache
385 # invalidate the cache
387 raise ValueError('invalidating branch cache (tip differs)')
386 raise ValueError('invalidating branch cache (tip differs)')
388 for l in lines:
387 for l in lines:
389 if not l: continue
388 if not l: continue
390 node, label = l.split(" ", 1)
389 node, label = l.split(" ", 1)
391 partial[label.strip()] = bin(node)
390 partial[label.strip()] = bin(node)
392 except (KeyboardInterrupt, util.SignalInterrupt):
391 except (KeyboardInterrupt, util.SignalInterrupt):
393 raise
392 raise
394 except Exception, inst:
393 except Exception, inst:
395 if self.ui.debugflag:
394 if self.ui.debugflag:
396 self.ui.warn(str(inst), '\n')
395 self.ui.warn(str(inst), '\n')
397 partial, last, lrev = {}, nullid, nullrev
396 partial, last, lrev = {}, nullid, nullrev
398 return partial, last, lrev
397 return partial, last, lrev
399
398
400 def _writebranchcache(self, branches, tip, tiprev):
399 def _writebranchcache(self, branches, tip, tiprev):
401 try:
400 try:
402 f = self.opener("branch.cache", "w", atomictemp=True)
401 f = self.opener("branch.cache", "w", atomictemp=True)
403 f.write("%s %s\n" % (hex(tip), tiprev))
402 f.write("%s %s\n" % (hex(tip), tiprev))
404 for label, node in branches.iteritems():
403 for label, node in branches.iteritems():
405 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
406 f.rename()
405 f.rename()
407 except (IOError, OSError):
406 except (IOError, OSError):
408 pass
407 pass
409
408
410 def _updatebranchcache(self, partial, start, end):
409 def _updatebranchcache(self, partial, start, end):
411 for r in xrange(start, end):
410 for r in xrange(start, end):
412 c = self.changectx(r)
411 c = self.changectx(r)
413 b = c.branch()
412 b = c.branch()
414 partial[b] = c.node()
413 partial[b] = c.node()
415
414
416 def lookup(self, key):
415 def lookup(self, key):
417 if key == '.':
416 if key == '.':
418 key, second = self.dirstate.parents()
417 key, second = self.dirstate.parents()
419 if key == nullid:
418 if key == nullid:
420 raise repo.RepoError(_("no revision checked out"))
419 raise repo.RepoError(_("no revision checked out"))
421 if second != nullid:
420 if second != nullid:
422 self.ui.warn(_("warning: working directory has two parents, "
421 self.ui.warn(_("warning: working directory has two parents, "
423 "tag '.' uses the first\n"))
422 "tag '.' uses the first\n"))
424 elif key == 'null':
423 elif key == 'null':
425 return nullid
424 return nullid
426 n = self.changelog._match(key)
425 n = self.changelog._match(key)
427 if n:
426 if n:
428 return n
427 return n
429 if key in self.tags():
428 if key in self.tags():
430 return self.tags()[key]
429 return self.tags()[key]
431 if key in self.branchtags():
430 if key in self.branchtags():
432 return self.branchtags()[key]
431 return self.branchtags()[key]
433 n = self.changelog._partialmatch(key)
432 n = self.changelog._partialmatch(key)
434 if n:
433 if n:
435 return n
434 return n
436 try:
435 try:
437 if len(key) == 20:
436 if len(key) == 20:
438 key = hex(key)
437 key = hex(key)
439 except:
438 except:
440 pass
439 pass
441 raise repo.RepoError(_("unknown revision '%s'") % key)
440 raise repo.RepoError(_("unknown revision '%s'") % key)
442
441
443 def dev(self):
442 def dev(self):
444 return os.lstat(self.path).st_dev
443 return os.lstat(self.path).st_dev
445
444
446 def local(self):
445 def local(self):
447 return True
446 return True
448
447
449 def join(self, f):
448 def join(self, f):
450 return os.path.join(self.path, f)
449 return os.path.join(self.path, f)
451
450
452 def sjoin(self, f):
451 def sjoin(self, f):
453 f = self.encodefn(f)
452 f = self.encodefn(f)
454 return os.path.join(self.spath, f)
453 return os.path.join(self.spath, f)
455
454
456 def wjoin(self, f):
455 def wjoin(self, f):
457 return os.path.join(self.root, f)
456 return os.path.join(self.root, f)
458
457
459 def file(self, f):
458 def file(self, f):
460 if f[0] == '/':
459 if f[0] == '/':
461 f = f[1:]
460 f = f[1:]
462 return filelog.filelog(self.sopener, f)
461 return filelog.filelog(self.sopener, f)
463
462
464 def changectx(self, changeid=None):
463 def changectx(self, changeid=None):
465 return context.changectx(self, changeid)
464 return context.changectx(self, changeid)
466
465
467 def workingctx(self):
466 def workingctx(self):
468 return context.workingctx(self)
467 return context.workingctx(self)
469
468
470 def parents(self, changeid=None):
469 def parents(self, changeid=None):
471 '''
470 '''
472 get list of changectxs for parents of changeid or working directory
471 get list of changectxs for parents of changeid or working directory
473 '''
472 '''
474 if changeid is None:
473 if changeid is None:
475 pl = self.dirstate.parents()
474 pl = self.dirstate.parents()
476 else:
475 else:
477 n = self.changelog.lookup(changeid)
476 n = self.changelog.lookup(changeid)
478 pl = self.changelog.parents(n)
477 pl = self.changelog.parents(n)
479 if pl[1] == nullid:
478 if pl[1] == nullid:
480 return [self.changectx(pl[0])]
479 return [self.changectx(pl[0])]
481 return [self.changectx(pl[0]), self.changectx(pl[1])]
480 return [self.changectx(pl[0]), self.changectx(pl[1])]
482
481
483 def filectx(self, path, changeid=None, fileid=None):
482 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
483 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
484 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
485 return context.filectx(self, path, changeid, fileid)
487
486
488 def getcwd(self):
487 def getcwd(self):
489 return self.dirstate.getcwd()
488 return self.dirstate.getcwd()
490
489
491 def pathto(self, f, cwd=None):
490 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
491 return self.dirstate.pathto(f, cwd)
493
492
494 def wfile(self, f, mode='r'):
493 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
494 return self.wopener(f, mode)
496
495
497 def _link(self, f):
496 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
497 return os.path.islink(self.wjoin(f))
499
498
500 def _filter(self, filter, filename, data):
499 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
500 if filter not in self.filterpats:
502 l = []
501 l = []
503 for pat, cmd in self.ui.configitems(filter):
502 for pat, cmd in self.ui.configitems(filter):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 fn = None
504 fn = None
506 params = cmd
505 params = cmd
507 for name, filterfn in self._datafilters.iteritems():
506 for name, filterfn in self._datafilters.iteritems():
508 if cmd.startswith(name):
507 if cmd.startswith(name):
509 fn = filterfn
508 fn = filterfn
510 params = cmd[len(name):].lstrip()
509 params = cmd[len(name):].lstrip()
511 break
510 break
512 if not fn:
511 if not fn:
513 fn = lambda s, c, **kwargs: util.filter(s, c)
512 fn = lambda s, c, **kwargs: util.filter(s, c)
514 # Wrap old filters not supporting keyword arguments
513 # Wrap old filters not supporting keyword arguments
515 if not inspect.getargspec(fn)[2]:
514 if not inspect.getargspec(fn)[2]:
516 oldfn = fn
515 oldfn = fn
517 fn = lambda s, c, **kwargs: oldfn(s, c)
516 fn = lambda s, c, **kwargs: oldfn(s, c)
518 l.append((mf, fn, params))
517 l.append((mf, fn, params))
519 self.filterpats[filter] = l
518 self.filterpats[filter] = l
520
519
521 for mf, fn, cmd in self.filterpats[filter]:
520 for mf, fn, cmd in self.filterpats[filter]:
522 if mf(filename):
521 if mf(filename):
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
522 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
523 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 break
524 break
526
525
527 return data
526 return data
528
527
529 def adddatafilter(self, name, filter):
528 def adddatafilter(self, name, filter):
530 self._datafilters[name] = filter
529 self._datafilters[name] = filter
531
530
532 def wread(self, filename):
531 def wread(self, filename):
533 if self._link(filename):
532 if self._link(filename):
534 data = os.readlink(self.wjoin(filename))
533 data = os.readlink(self.wjoin(filename))
535 else:
534 else:
536 data = self.wopener(filename, 'r').read()
535 data = self.wopener(filename, 'r').read()
537 return self._filter("encode", filename, data)
536 return self._filter("encode", filename, data)
538
537
539 def wwrite(self, filename, data, flags):
538 def wwrite(self, filename, data, flags):
540 data = self._filter("decode", filename, data)
539 data = self._filter("decode", filename, data)
541 try:
540 try:
542 os.unlink(self.wjoin(filename))
541 os.unlink(self.wjoin(filename))
543 except OSError:
542 except OSError:
544 pass
543 pass
545 self.wopener(filename, 'w').write(data)
544 self.wopener(filename, 'w').write(data)
546 util.set_flags(self.wjoin(filename), flags)
545 util.set_flags(self.wjoin(filename), flags)
547
546
548 def wwritedata(self, filename, data):
547 def wwritedata(self, filename, data):
549 return self._filter("decode", filename, data)
548 return self._filter("decode", filename, data)
550
549
551 def transaction(self):
550 def transaction(self):
552 if self._transref and self._transref():
551 if self._transref and self._transref():
553 return self._transref().nest()
552 return self._transref().nest()
554
553
555 # abort here if the journal already exists
554 # abort here if the journal already exists
556 if os.path.exists(self.sjoin("journal")):
555 if os.path.exists(self.sjoin("journal")):
557 raise repo.RepoError(_("journal already exists - run hg recover"))
556 raise repo.RepoError(_("journal already exists - run hg recover"))
558
557
559 # save dirstate for rollback
558 # save dirstate for rollback
560 try:
559 try:
561 ds = self.opener("dirstate").read()
560 ds = self.opener("dirstate").read()
562 except IOError:
561 except IOError:
563 ds = ""
562 ds = ""
564 self.opener("journal.dirstate", "w").write(ds)
563 self.opener("journal.dirstate", "w").write(ds)
565 self.opener("journal.branch", "w").write(self.dirstate.branch())
564 self.opener("journal.branch", "w").write(self.dirstate.branch())
566
565
567 renames = [(self.sjoin("journal"), self.sjoin("undo")),
566 renames = [(self.sjoin("journal"), self.sjoin("undo")),
568 (self.join("journal.dirstate"), self.join("undo.dirstate")),
567 (self.join("journal.dirstate"), self.join("undo.dirstate")),
569 (self.join("journal.branch"), self.join("undo.branch"))]
568 (self.join("journal.branch"), self.join("undo.branch"))]
570 tr = transaction.transaction(self.ui.warn, self.sopener,
569 tr = transaction.transaction(self.ui.warn, self.sopener,
571 self.sjoin("journal"),
570 self.sjoin("journal"),
572 aftertrans(renames),
571 aftertrans(renames),
573 self._createmode)
572 self._createmode)
574 self._transref = weakref.ref(tr)
573 self._transref = weakref.ref(tr)
575 return tr
574 return tr
576
575
577 def recover(self):
576 def recover(self):
578 l = self.lock()
577 l = self.lock()
579 try:
578 try:
580 if os.path.exists(self.sjoin("journal")):
579 if os.path.exists(self.sjoin("journal")):
581 self.ui.status(_("rolling back interrupted transaction\n"))
580 self.ui.status(_("rolling back interrupted transaction\n"))
582 transaction.rollback(self.sopener, self.sjoin("journal"))
581 transaction.rollback(self.sopener, self.sjoin("journal"))
583 self.invalidate()
582 self.invalidate()
584 return True
583 return True
585 else:
584 else:
586 self.ui.warn(_("no interrupted transaction available\n"))
585 self.ui.warn(_("no interrupted transaction available\n"))
587 return False
586 return False
588 finally:
587 finally:
589 del l
588 del l
590
589
591 def rollback(self):
590 def rollback(self):
592 wlock = lock = None
591 wlock = lock = None
593 try:
592 try:
594 wlock = self.wlock()
593 wlock = self.wlock()
595 lock = self.lock()
594 lock = self.lock()
596 if os.path.exists(self.sjoin("undo")):
595 if os.path.exists(self.sjoin("undo")):
597 self.ui.status(_("rolling back last transaction\n"))
596 self.ui.status(_("rolling back last transaction\n"))
598 transaction.rollback(self.sopener, self.sjoin("undo"))
597 transaction.rollback(self.sopener, self.sjoin("undo"))
599 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
598 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
600 try:
599 try:
601 branch = self.opener("undo.branch").read()
600 branch = self.opener("undo.branch").read()
602 self.dirstate.setbranch(branch)
601 self.dirstate.setbranch(branch)
603 except IOError:
602 except IOError:
604 self.ui.warn(_("Named branch could not be reset, "
603 self.ui.warn(_("Named branch could not be reset, "
605 "current branch still is: %s\n")
604 "current branch still is: %s\n")
606 % util.tolocal(self.dirstate.branch()))
605 % util.tolocal(self.dirstate.branch()))
607 self.invalidate()
606 self.invalidate()
608 self.dirstate.invalidate()
607 self.dirstate.invalidate()
609 else:
608 else:
610 self.ui.warn(_("no rollback information available\n"))
609 self.ui.warn(_("no rollback information available\n"))
611 finally:
610 finally:
612 del lock, wlock
611 del lock, wlock
613
612
614 def invalidate(self):
613 def invalidate(self):
615 for a in "changelog manifest".split():
614 for a in "changelog manifest".split():
616 if hasattr(self, a):
615 if hasattr(self, a):
617 self.__delattr__(a)
616 self.__delattr__(a)
618 self.tagscache = None
617 self.tagscache = None
619 self._tagstypecache = None
618 self._tagstypecache = None
620 self.nodetagscache = None
619 self.nodetagscache = None
621 self.branchcache = None
620 self.branchcache = None
622 self._ubranchcache = None
621 self._ubranchcache = None
623
622
624 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
623 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
625 try:
624 try:
626 l = lock.lock(lockname, 0, releasefn, desc=desc)
625 l = lock.lock(lockname, 0, releasefn, desc=desc)
627 except lock.LockHeld, inst:
626 except lock.LockHeld, inst:
628 if not wait:
627 if not wait:
629 raise
628 raise
630 self.ui.warn(_("waiting for lock on %s held by %r\n") %
629 self.ui.warn(_("waiting for lock on %s held by %r\n") %
631 (desc, inst.locker))
630 (desc, inst.locker))
632 # default to 600 seconds timeout
631 # default to 600 seconds timeout
633 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
632 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
634 releasefn, desc=desc)
633 releasefn, desc=desc)
635 if acquirefn:
634 if acquirefn:
636 acquirefn()
635 acquirefn()
637 return l
636 return l
638
637
639 def lock(self, wait=True):
638 def lock(self, wait=True):
640 if self._lockref and self._lockref():
639 if self._lockref and self._lockref():
641 return self._lockref()
640 return self._lockref()
642
641
643 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
642 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
644 _('repository %s') % self.origroot)
643 _('repository %s') % self.origroot)
645 self._lockref = weakref.ref(l)
644 self._lockref = weakref.ref(l)
646 return l
645 return l
647
646
648 def wlock(self, wait=True):
647 def wlock(self, wait=True):
649 if self._wlockref and self._wlockref():
648 if self._wlockref and self._wlockref():
650 return self._wlockref()
649 return self._wlockref()
651
650
652 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
651 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
653 self.dirstate.invalidate, _('working directory of %s') %
652 self.dirstate.invalidate, _('working directory of %s') %
654 self.origroot)
653 self.origroot)
655 self._wlockref = weakref.ref(l)
654 self._wlockref = weakref.ref(l)
656 return l
655 return l
657
656
658 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
657 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
659 """
658 """
660 commit an individual file as part of a larger transaction
659 commit an individual file as part of a larger transaction
661 """
660 """
662
661
663 t = self.wread(fn)
662 t = self.wread(fn)
664 fl = self.file(fn)
663 fl = self.file(fn)
665 fp1 = manifest1.get(fn, nullid)
664 fp1 = manifest1.get(fn, nullid)
666 fp2 = manifest2.get(fn, nullid)
665 fp2 = manifest2.get(fn, nullid)
667
666
668 meta = {}
667 meta = {}
669 cp = self.dirstate.copied(fn)
668 cp = self.dirstate.copied(fn)
670 if cp:
669 if cp:
671 # Mark the new revision of this file as a copy of another
670 # Mark the new revision of this file as a copy of another
672 # file. This copy data will effectively act as a parent
671 # file. This copy data will effectively act as a parent
673 # of this new revision. If this is a merge, the first
672 # of this new revision. If this is a merge, the first
674 # parent will be the nullid (meaning "look up the copy data")
673 # parent will be the nullid (meaning "look up the copy data")
675 # and the second one will be the other parent. For example:
674 # and the second one will be the other parent. For example:
676 #
675 #
677 # 0 --- 1 --- 3 rev1 changes file foo
676 # 0 --- 1 --- 3 rev1 changes file foo
678 # \ / rev2 renames foo to bar and changes it
677 # \ / rev2 renames foo to bar and changes it
679 # \- 2 -/ rev3 should have bar with all changes and
678 # \- 2 -/ rev3 should have bar with all changes and
680 # should record that bar descends from
679 # should record that bar descends from
681 # bar in rev2 and foo in rev1
680 # bar in rev2 and foo in rev1
682 #
681 #
683 # this allows this merge to succeed:
682 # this allows this merge to succeed:
684 #
683 #
685 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
684 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
686 # \ / merging rev3 and rev4 should use bar@rev2
685 # \ / merging rev3 and rev4 should use bar@rev2
687 # \- 2 --- 4 as the merge base
686 # \- 2 --- 4 as the merge base
688 #
687 #
689 meta["copy"] = cp
688 meta["copy"] = cp
690 if not manifest2: # not a branch merge
689 if not manifest2: # not a branch merge
691 meta["copyrev"] = hex(manifest1.get(cp, nullid))
690 meta["copyrev"] = hex(manifest1.get(cp, nullid))
692 fp2 = nullid
691 fp2 = nullid
693 elif fp2 != nullid: # copied on remote side
692 elif fp2 != nullid: # copied on remote side
694 meta["copyrev"] = hex(manifest1.get(cp, nullid))
693 meta["copyrev"] = hex(manifest1.get(cp, nullid))
695 elif fp1 != nullid: # copied on local side, reversed
694 elif fp1 != nullid: # copied on local side, reversed
696 meta["copyrev"] = hex(manifest2.get(cp))
695 meta["copyrev"] = hex(manifest2.get(cp))
697 fp2 = fp1
696 fp2 = fp1
698 elif cp in manifest2: # directory rename on local side
697 elif cp in manifest2: # directory rename on local side
699 meta["copyrev"] = hex(manifest2[cp])
698 meta["copyrev"] = hex(manifest2[cp])
700 else: # directory rename on remote side
699 else: # directory rename on remote side
701 meta["copyrev"] = hex(manifest1.get(cp, nullid))
700 meta["copyrev"] = hex(manifest1.get(cp, nullid))
702 self.ui.debug(_(" %s: copy %s:%s\n") %
701 self.ui.debug(_(" %s: copy %s:%s\n") %
703 (fn, cp, meta["copyrev"]))
702 (fn, cp, meta["copyrev"]))
704 fp1 = nullid
703 fp1 = nullid
705 elif fp2 != nullid:
704 elif fp2 != nullid:
706 # is one parent an ancestor of the other?
705 # is one parent an ancestor of the other?
707 fpa = fl.ancestor(fp1, fp2)
706 fpa = fl.ancestor(fp1, fp2)
708 if fpa == fp1:
707 if fpa == fp1:
709 fp1, fp2 = fp2, nullid
708 fp1, fp2 = fp2, nullid
710 elif fpa == fp2:
709 elif fpa == fp2:
711 fp2 = nullid
710 fp2 = nullid
712
711
713 # is the file unmodified from the parent? report existing entry
712 # is the file unmodified from the parent? report existing entry
714 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
713 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
715 return fp1
714 return fp1
716
715
717 changelist.append(fn)
716 changelist.append(fn)
718 return fl.add(t, meta, tr, linkrev, fp1, fp2)
717 return fl.add(t, meta, tr, linkrev, fp1, fp2)
719
718
720 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
719 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
721 if p1 is None:
720 if p1 is None:
722 p1, p2 = self.dirstate.parents()
721 p1, p2 = self.dirstate.parents()
723 return self.commit(files=files, text=text, user=user, date=date,
722 return self.commit(files=files, text=text, user=user, date=date,
724 p1=p1, p2=p2, extra=extra, empty_ok=True)
723 p1=p1, p2=p2, extra=extra, empty_ok=True)
725
724
726 def commit(self, files=None, text="", user=None, date=None,
725 def commit(self, files=None, text="", user=None, date=None,
727 match=util.always, force=False, force_editor=False,
726 match=util.always, force=False, force_editor=False,
728 p1=None, p2=None, extra={}, empty_ok=False):
727 p1=None, p2=None, extra={}, empty_ok=False):
729 wlock = lock = tr = None
728 wlock = lock = tr = None
730 valid = 0 # don't save the dirstate if this isn't set
729 valid = 0 # don't save the dirstate if this isn't set
731 if files:
730 if files:
732 files = util.unique(files)
731 files = util.unique(files)
733 try:
732 try:
734 commit = []
733 commit = []
735 remove = []
734 remove = []
736 changed = []
735 changed = []
737 use_dirstate = (p1 is None) # not rawcommit
736 use_dirstate = (p1 is None) # not rawcommit
738 extra = extra.copy()
737 extra = extra.copy()
739
738
740 if use_dirstate:
739 if use_dirstate:
741 if files:
740 if files:
742 for f in files:
741 for f in files:
743 s = self.dirstate[f]
742 s = self.dirstate[f]
744 if s in 'nma':
743 if s in 'nma':
745 commit.append(f)
744 commit.append(f)
746 elif s == 'r':
745 elif s == 'r':
747 remove.append(f)
746 remove.append(f)
748 else:
747 else:
749 self.ui.warn(_("%s not tracked!\n") % f)
748 self.ui.warn(_("%s not tracked!\n") % f)
750 else:
749 else:
751 changes = self.status(match=match)[:5]
750 changes = self.status(match=match)[:5]
752 modified, added, removed, deleted, unknown = changes
751 modified, added, removed, deleted, unknown = changes
753 commit = modified + added
752 commit = modified + added
754 remove = removed
753 remove = removed
755 else:
754 else:
756 commit = files
755 commit = files
757
756
758 if use_dirstate:
757 if use_dirstate:
759 p1, p2 = self.dirstate.parents()
758 p1, p2 = self.dirstate.parents()
760 update_dirstate = True
759 update_dirstate = True
761 else:
760 else:
762 p1, p2 = p1, p2 or nullid
761 p1, p2 = p1, p2 or nullid
763 update_dirstate = (self.dirstate.parents()[0] == p1)
762 update_dirstate = (self.dirstate.parents()[0] == p1)
764
763
765 c1 = self.changelog.read(p1)
764 c1 = self.changelog.read(p1)
766 c2 = self.changelog.read(p2)
765 c2 = self.changelog.read(p2)
767 m1 = self.manifest.read(c1[0]).copy()
766 m1 = self.manifest.read(c1[0]).copy()
768 m2 = self.manifest.read(c2[0])
767 m2 = self.manifest.read(c2[0])
769
768
770 if use_dirstate:
769 if use_dirstate:
771 branchname = self.workingctx().branch()
770 branchname = self.workingctx().branch()
772 try:
771 try:
773 branchname = branchname.decode('UTF-8').encode('UTF-8')
772 branchname = branchname.decode('UTF-8').encode('UTF-8')
774 except UnicodeDecodeError:
773 except UnicodeDecodeError:
775 raise util.Abort(_('branch name not in UTF-8!'))
774 raise util.Abort(_('branch name not in UTF-8!'))
776 else:
775 else:
777 branchname = ""
776 branchname = ""
778
777
779 if use_dirstate:
778 if use_dirstate:
780 oldname = c1[5].get("branch") # stored in UTF-8
779 oldname = c1[5].get("branch") # stored in UTF-8
781 if (not commit and not remove and not force and p2 == nullid
780 if (not commit and not remove and not force and p2 == nullid
782 and branchname == oldname):
781 and branchname == oldname):
783 self.ui.status(_("nothing changed\n"))
782 self.ui.status(_("nothing changed\n"))
784 return None
783 return None
785
784
786 xp1 = hex(p1)
785 xp1 = hex(p1)
787 if p2 == nullid: xp2 = ''
786 if p2 == nullid: xp2 = ''
788 else: xp2 = hex(p2)
787 else: xp2 = hex(p2)
789
788
790 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
789 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
791
790
792 wlock = self.wlock()
791 wlock = self.wlock()
793 lock = self.lock()
792 lock = self.lock()
794 tr = self.transaction()
793 tr = self.transaction()
795 trp = weakref.proxy(tr)
794 trp = weakref.proxy(tr)
796
795
797 # check in files
796 # check in files
798 new = {}
797 new = {}
799 linkrev = self.changelog.count()
798 linkrev = self.changelog.count()
800 commit.sort()
799 commit.sort()
801 is_exec = util.execfunc(self.root, m1.execf)
800 is_exec = util.execfunc(self.root, m1.execf)
802 is_link = util.linkfunc(self.root, m1.linkf)
801 is_link = util.linkfunc(self.root, m1.linkf)
803 for f in commit:
802 for f in commit:
804 self.ui.note(f + "\n")
803 self.ui.note(f + "\n")
805 try:
804 try:
806 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
805 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
807 new_exec = is_exec(f)
806 new_exec = is_exec(f)
808 new_link = is_link(f)
807 new_link = is_link(f)
809 if ((not changed or changed[-1] != f) and
808 if ((not changed or changed[-1] != f) and
810 m2.get(f) != new[f]):
809 m2.get(f) != new[f]):
811 # mention the file in the changelog if some
810 # mention the file in the changelog if some
812 # flag changed, even if there was no content
811 # flag changed, even if there was no content
813 # change.
812 # change.
814 old_exec = m1.execf(f)
813 old_exec = m1.execf(f)
815 old_link = m1.linkf(f)
814 old_link = m1.linkf(f)
816 if old_exec != new_exec or old_link != new_link:
815 if old_exec != new_exec or old_link != new_link:
817 changed.append(f)
816 changed.append(f)
818 m1.set(f, new_exec, new_link)
817 m1.set(f, new_exec, new_link)
819 if use_dirstate:
818 if use_dirstate:
820 self.dirstate.normal(f)
819 self.dirstate.normal(f)
821
820
822 except (OSError, IOError):
821 except (OSError, IOError):
823 if use_dirstate:
822 if use_dirstate:
824 self.ui.warn(_("trouble committing %s!\n") % f)
823 self.ui.warn(_("trouble committing %s!\n") % f)
825 raise
824 raise
826 else:
825 else:
827 remove.append(f)
826 remove.append(f)
828
827
829 # update manifest
828 # update manifest
830 m1.update(new)
829 m1.update(new)
831 remove.sort()
830 remove.sort()
832 removed = []
831 removed = []
833
832
834 for f in remove:
833 for f in remove:
835 if f in m1:
834 if f in m1:
836 del m1[f]
835 del m1[f]
837 removed.append(f)
836 removed.append(f)
838 elif f in m2:
837 elif f in m2:
839 removed.append(f)
838 removed.append(f)
840 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
839 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
841 (new, removed))
840 (new, removed))
842
841
843 # add changeset
842 # add changeset
844 new = new.keys()
843 new = new.keys()
845 new.sort()
844 new.sort()
846
845
847 user = user or self.ui.username()
846 user = user or self.ui.username()
848 if (not empty_ok and not text) or force_editor:
847 if (not empty_ok and not text) or force_editor:
849 edittext = []
848 edittext = []
850 if text:
849 if text:
851 edittext.append(text)
850 edittext.append(text)
852 edittext.append("")
851 edittext.append("")
853 edittext.append(_("HG: Enter commit message."
852 edittext.append(_("HG: Enter commit message."
854 " Lines beginning with 'HG:' are removed."))
853 " Lines beginning with 'HG:' are removed."))
855 edittext.append("HG: --")
854 edittext.append("HG: --")
856 edittext.append("HG: user: %s" % user)
855 edittext.append("HG: user: %s" % user)
857 if p2 != nullid:
856 if p2 != nullid:
858 edittext.append("HG: branch merge")
857 edittext.append("HG: branch merge")
859 if branchname:
858 if branchname:
860 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
859 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
861 edittext.extend(["HG: changed %s" % f for f in changed])
860 edittext.extend(["HG: changed %s" % f for f in changed])
862 edittext.extend(["HG: removed %s" % f for f in removed])
861 edittext.extend(["HG: removed %s" % f for f in removed])
863 if not changed and not remove:
862 if not changed and not remove:
864 edittext.append("HG: no files changed")
863 edittext.append("HG: no files changed")
865 edittext.append("")
864 edittext.append("")
866 # run editor in the repository root
865 # run editor in the repository root
867 olddir = os.getcwd()
866 olddir = os.getcwd()
868 os.chdir(self.root)
867 os.chdir(self.root)
869 text = self.ui.edit("\n".join(edittext), user)
868 text = self.ui.edit("\n".join(edittext), user)
870 os.chdir(olddir)
869 os.chdir(olddir)
871
870
872 if branchname:
871 if branchname:
873 extra["branch"] = branchname
872 extra["branch"] = branchname
874
873
875 if use_dirstate:
874 if use_dirstate:
876 lines = [line.rstrip() for line in text.rstrip().splitlines()]
875 lines = [line.rstrip() for line in text.rstrip().splitlines()]
877 while lines and not lines[0]:
876 while lines and not lines[0]:
878 del lines[0]
877 del lines[0]
879 if not lines:
878 if not lines:
880 raise util.Abort(_("empty commit message"))
879 raise util.Abort(_("empty commit message"))
881 text = '\n'.join(lines)
880 text = '\n'.join(lines)
882
881
883 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
882 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
884 user, date, extra)
883 user, date, extra)
885 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
884 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
886 parent2=xp2)
885 parent2=xp2)
887 tr.close()
886 tr.close()
888
887
889 if self.branchcache and "branch" in extra:
888 if self.branchcache and "branch" in extra:
890 self.branchcache[util.tolocal(extra["branch"])] = n
889 self.branchcache[util.tolocal(extra["branch"])] = n
891
890
892 if use_dirstate or update_dirstate:
891 if use_dirstate or update_dirstate:
893 self.dirstate.setparents(n)
892 self.dirstate.setparents(n)
894 if use_dirstate:
893 if use_dirstate:
895 for f in removed:
894 for f in removed:
896 self.dirstate.forget(f)
895 self.dirstate.forget(f)
897 valid = 1 # our dirstate updates are complete
896 valid = 1 # our dirstate updates are complete
898
897
899 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
898 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
900 return n
899 return n
901 finally:
900 finally:
902 if not valid: # don't save our updated dirstate
901 if not valid: # don't save our updated dirstate
903 self.dirstate.invalidate()
902 self.dirstate.invalidate()
904 del tr, lock, wlock
903 del tr, lock, wlock
905
904
906 def walk(self, node=None, files=[], match=util.always, badmatch=None):
905 def walk(self, node=None, files=[], match=util.always, badmatch=None):
907 '''
906 '''
908 walk recursively through the directory tree or a given
907 walk recursively through the directory tree or a given
909 changeset, finding all files matched by the match
908 changeset, finding all files matched by the match
910 function
909 function
911
910
912 results are yielded in a tuple (src, filename), where src
911 results are yielded in a tuple (src, filename), where src
913 is one of:
912 is one of:
914 'f' the file was found in the directory tree
913 'f' the file was found in the directory tree
915 'm' the file was only in the dirstate and not in the tree
914 'm' the file was only in the dirstate and not in the tree
916 'b' file was not found and matched badmatch
915 'b' file was not found and matched badmatch
917 '''
916 '''
918
917
919 if node:
918 if node:
920 fdict = dict.fromkeys(files)
919 fdict = dict.fromkeys(files)
921 # for dirstate.walk, files=['.'] means "walk the whole tree".
920 # for dirstate.walk, files=['.'] means "walk the whole tree".
922 # follow that here, too
921 # follow that here, too
923 fdict.pop('.', None)
922 fdict.pop('.', None)
924 mdict = self.manifest.read(self.changelog.read(node)[0])
923 mdict = self.manifest.read(self.changelog.read(node)[0])
925 mfiles = mdict.keys()
924 mfiles = mdict.keys()
926 mfiles.sort()
925 mfiles.sort()
927 for fn in mfiles:
926 for fn in mfiles:
928 for ffn in fdict:
927 for ffn in fdict:
929 # match if the file is the exact name or a directory
928 # match if the file is the exact name or a directory
930 if ffn == fn or fn.startswith("%s/" % ffn):
929 if ffn == fn or fn.startswith("%s/" % ffn):
931 del fdict[ffn]
930 del fdict[ffn]
932 break
931 break
933 if match(fn):
932 if match(fn):
934 yield 'm', fn
933 yield 'm', fn
935 ffiles = fdict.keys()
934 ffiles = fdict.keys()
936 ffiles.sort()
935 ffiles.sort()
937 for fn in ffiles:
936 for fn in ffiles:
938 if badmatch and badmatch(fn):
937 if badmatch and badmatch(fn):
939 if match(fn):
938 if match(fn):
940 yield 'b', fn
939 yield 'b', fn
941 else:
940 else:
942 self.ui.warn(_('%s: No such file in rev %s\n')
941 self.ui.warn(_('%s: No such file in rev %s\n')
943 % (self.pathto(fn), short(node)))
942 % (self.pathto(fn), short(node)))
944 else:
943 else:
945 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
944 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
946 yield src, fn
945 yield src, fn
947
946
948 def status(self, node1=None, node2=None, files=[], match=util.always,
947 def status(self, node1=None, node2=None, files=[], match=util.always,
949 list_ignored=False, list_clean=False):
948 list_ignored=False, list_clean=False):
950 """return status of files between two nodes or node and working directory
949 """return status of files between two nodes or node and working directory
951
950
952 If node1 is None, use the first dirstate parent instead.
951 If node1 is None, use the first dirstate parent instead.
953 If node2 is None, compare node1 with working directory.
952 If node2 is None, compare node1 with working directory.
954 """
953 """
955
954
956 def fcmp(fn, getnode):
955 def fcmp(fn, getnode):
957 t1 = self.wread(fn)
956 t1 = self.wread(fn)
958 return self.file(fn).cmp(getnode(fn), t1)
957 return self.file(fn).cmp(getnode(fn), t1)
959
958
960 def mfmatches(node):
959 def mfmatches(node):
961 change = self.changelog.read(node)
960 change = self.changelog.read(node)
962 mf = self.manifest.read(change[0]).copy()
961 mf = self.manifest.read(change[0]).copy()
963 for fn in mf.keys():
962 for fn in mf.keys():
964 if not match(fn):
963 if not match(fn):
965 del mf[fn]
964 del mf[fn]
966 return mf
965 return mf
967
966
968 modified, added, removed, deleted, unknown = [], [], [], [], []
967 modified, added, removed, deleted, unknown = [], [], [], [], []
969 ignored, clean = [], []
968 ignored, clean = [], []
970
969
971 compareworking = False
970 compareworking = False
972 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
971 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
973 compareworking = True
972 compareworking = True
974
973
975 if not compareworking:
974 if not compareworking:
976 # read the manifest from node1 before the manifest from node2,
975 # read the manifest from node1 before the manifest from node2,
977 # so that we'll hit the manifest cache if we're going through
976 # so that we'll hit the manifest cache if we're going through
978 # all the revisions in parent->child order.
977 # all the revisions in parent->child order.
979 mf1 = mfmatches(node1)
978 mf1 = mfmatches(node1)
980
979
981 # are we comparing the working directory?
980 # are we comparing the working directory?
982 if not node2:
981 if not node2:
983 (lookup, modified, added, removed, deleted, unknown,
982 (lookup, modified, added, removed, deleted, unknown,
984 ignored, clean) = self.dirstate.status(files, match,
983 ignored, clean) = self.dirstate.status(files, match,
985 list_ignored, list_clean)
984 list_ignored, list_clean)
986
985
987 # are we comparing working dir against its parent?
986 # are we comparing working dir against its parent?
988 if compareworking:
987 if compareworking:
989 if lookup:
988 if lookup:
990 fixup = []
989 fixup = []
991 # do a full compare of any files that might have changed
990 # do a full compare of any files that might have changed
992 ctx = self.changectx()
991 ctx = self.changectx()
993 for f in lookup:
992 for f in lookup:
994 if f not in ctx or ctx[f].cmp(self.wread(f)):
993 if f not in ctx or ctx[f].cmp(self.wread(f)):
995 modified.append(f)
994 modified.append(f)
996 else:
995 else:
997 fixup.append(f)
996 fixup.append(f)
998 if list_clean:
997 if list_clean:
999 clean.append(f)
998 clean.append(f)
1000
999
1001 # update dirstate for files that are actually clean
1000 # update dirstate for files that are actually clean
1002 if fixup:
1001 if fixup:
1003 wlock = None
1002 wlock = None
1004 try:
1003 try:
1005 try:
1004 try:
1006 wlock = self.wlock(False)
1005 wlock = self.wlock(False)
1007 except lock.LockException:
1006 except lock.LockException:
1008 pass
1007 pass
1009 if wlock:
1008 if wlock:
1010 for f in fixup:
1009 for f in fixup:
1011 self.dirstate.normal(f)
1010 self.dirstate.normal(f)
1012 finally:
1011 finally:
1013 del wlock
1012 del wlock
1014 else:
1013 else:
1015 # we are comparing working dir against non-parent
1014 # we are comparing working dir against non-parent
1016 # generate a pseudo-manifest for the working dir
1015 # generate a pseudo-manifest for the working dir
1017 # XXX: create it in dirstate.py ?
1016 # XXX: create it in dirstate.py ?
1018 mf2 = mfmatches(self.dirstate.parents()[0])
1017 mf2 = mfmatches(self.dirstate.parents()[0])
1019 is_exec = util.execfunc(self.root, mf2.execf)
1018 is_exec = util.execfunc(self.root, mf2.execf)
1020 is_link = util.linkfunc(self.root, mf2.linkf)
1019 is_link = util.linkfunc(self.root, mf2.linkf)
1021 for f in lookup + modified + added:
1020 for f in lookup + modified + added:
1022 mf2[f] = ""
1021 mf2[f] = ""
1023 mf2.set(f, is_exec(f), is_link(f))
1022 mf2.set(f, is_exec(f), is_link(f))
1024 for f in removed:
1023 for f in removed:
1025 if f in mf2:
1024 if f in mf2:
1026 del mf2[f]
1025 del mf2[f]
1027
1026
1028 else:
1027 else:
1029 # we are comparing two revisions
1028 # we are comparing two revisions
1030 mf2 = mfmatches(node2)
1029 mf2 = mfmatches(node2)
1031
1030
1032 if not compareworking:
1031 if not compareworking:
1033 # flush lists from dirstate before comparing manifests
1032 # flush lists from dirstate before comparing manifests
1034 modified, added, clean = [], [], []
1033 modified, added, clean = [], [], []
1035
1034
1036 # make sure to sort the files so we talk to the disk in a
1035 # make sure to sort the files so we talk to the disk in a
1037 # reasonable order
1036 # reasonable order
1038 mf2keys = mf2.keys()
1037 mf2keys = mf2.keys()
1039 mf2keys.sort()
1038 mf2keys.sort()
1040 getnode = lambda fn: mf1.get(fn, nullid)
1039 getnode = lambda fn: mf1.get(fn, nullid)
1041 for fn in mf2keys:
1040 for fn in mf2keys:
1042 if fn in mf1:
1041 if fn in mf1:
1043 if (mf1.flags(fn) != mf2.flags(fn) or
1042 if (mf1.flags(fn) != mf2.flags(fn) or
1044 (mf1[fn] != mf2[fn] and
1043 (mf1[fn] != mf2[fn] and
1045 (mf2[fn] != "" or fcmp(fn, getnode)))):
1044 (mf2[fn] != "" or fcmp(fn, getnode)))):
1046 modified.append(fn)
1045 modified.append(fn)
1047 elif list_clean:
1046 elif list_clean:
1048 clean.append(fn)
1047 clean.append(fn)
1049 del mf1[fn]
1048 del mf1[fn]
1050 else:
1049 else:
1051 added.append(fn)
1050 added.append(fn)
1052
1051
1053 removed = mf1.keys()
1052 removed = mf1.keys()
1054
1053
1055 # sort and return results:
1054 # sort and return results:
1056 for l in modified, added, removed, deleted, unknown, ignored, clean:
1055 for l in modified, added, removed, deleted, unknown, ignored, clean:
1057 l.sort()
1056 l.sort()
1058 return (modified, added, removed, deleted, unknown, ignored, clean)
1057 return (modified, added, removed, deleted, unknown, ignored, clean)
1059
1058
1060 def add(self, list):
1059 def add(self, list):
1061 wlock = self.wlock()
1060 wlock = self.wlock()
1062 try:
1061 try:
1063 rejected = []
1062 rejected = []
1064 for f in list:
1063 for f in list:
1065 p = self.wjoin(f)
1064 p = self.wjoin(f)
1066 try:
1065 try:
1067 st = os.lstat(p)
1066 st = os.lstat(p)
1068 except:
1067 except:
1069 self.ui.warn(_("%s does not exist!\n") % f)
1068 self.ui.warn(_("%s does not exist!\n") % f)
1070 rejected.append(f)
1069 rejected.append(f)
1071 continue
1070 continue
1072 if st.st_size > 10000000:
1071 if st.st_size > 10000000:
1073 self.ui.warn(_("%s: files over 10MB may cause memory and"
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1074 " performance problems\n"
1073 " performance problems\n"
1075 "(use 'hg revert %s' to unadd the file)\n")
1074 "(use 'hg revert %s' to unadd the file)\n")
1076 % (f, f))
1075 % (f, f))
1077 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1078 self.ui.warn(_("%s not added: only files and symlinks "
1077 self.ui.warn(_("%s not added: only files and symlinks "
1079 "supported currently\n") % f)
1078 "supported currently\n") % f)
1080 rejected.append(p)
1079 rejected.append(p)
1081 elif self.dirstate[f] in 'amn':
1080 elif self.dirstate[f] in 'amn':
1082 self.ui.warn(_("%s already tracked!\n") % f)
1081 self.ui.warn(_("%s already tracked!\n") % f)
1083 elif self.dirstate[f] == 'r':
1082 elif self.dirstate[f] == 'r':
1084 self.dirstate.normallookup(f)
1083 self.dirstate.normallookup(f)
1085 else:
1084 else:
1086 self.dirstate.add(f)
1085 self.dirstate.add(f)
1087 return rejected
1086 return rejected
1088 finally:
1087 finally:
1089 del wlock
1088 del wlock
1090
1089
1091 def forget(self, list):
1090 def forget(self, list):
1092 wlock = self.wlock()
1091 wlock = self.wlock()
1093 try:
1092 try:
1094 for f in list:
1093 for f in list:
1095 if self.dirstate[f] != 'a':
1094 if self.dirstate[f] != 'a':
1096 self.ui.warn(_("%s not added!\n") % f)
1095 self.ui.warn(_("%s not added!\n") % f)
1097 else:
1096 else:
1098 self.dirstate.forget(f)
1097 self.dirstate.forget(f)
1099 finally:
1098 finally:
1100 del wlock
1099 del wlock
1101
1100
1102 def remove(self, list, unlink=False):
1101 def remove(self, list, unlink=False):
1103 wlock = None
1102 wlock = None
1104 try:
1103 try:
1105 if unlink:
1104 if unlink:
1106 for f in list:
1105 for f in list:
1107 try:
1106 try:
1108 util.unlink(self.wjoin(f))
1107 util.unlink(self.wjoin(f))
1109 except OSError, inst:
1108 except OSError, inst:
1110 if inst.errno != errno.ENOENT:
1109 if inst.errno != errno.ENOENT:
1111 raise
1110 raise
1112 wlock = self.wlock()
1111 wlock = self.wlock()
1113 for f in list:
1112 for f in list:
1114 if unlink and os.path.exists(self.wjoin(f)):
1113 if unlink and os.path.exists(self.wjoin(f)):
1115 self.ui.warn(_("%s still exists!\n") % f)
1114 self.ui.warn(_("%s still exists!\n") % f)
1116 elif self.dirstate[f] == 'a':
1115 elif self.dirstate[f] == 'a':
1117 self.dirstate.forget(f)
1116 self.dirstate.forget(f)
1118 elif f not in self.dirstate:
1117 elif f not in self.dirstate:
1119 self.ui.warn(_("%s not tracked!\n") % f)
1118 self.ui.warn(_("%s not tracked!\n") % f)
1120 else:
1119 else:
1121 self.dirstate.remove(f)
1120 self.dirstate.remove(f)
1122 finally:
1121 finally:
1123 del wlock
1122 del wlock
1124
1123
1125 def undelete(self, list):
1124 def undelete(self, list):
1126 wlock = None
1125 wlock = None
1127 try:
1126 try:
1128 manifests = [self.manifest.read(self.changelog.read(p)[0])
1127 manifests = [self.manifest.read(self.changelog.read(p)[0])
1129 for p in self.dirstate.parents() if p != nullid]
1128 for p in self.dirstate.parents() if p != nullid]
1130 wlock = self.wlock()
1129 wlock = self.wlock()
1131 for f in list:
1130 for f in list:
1132 if self.dirstate[f] != 'r':
1131 if self.dirstate[f] != 'r':
1133 self.ui.warn("%s not removed!\n" % f)
1132 self.ui.warn("%s not removed!\n" % f)
1134 else:
1133 else:
1135 m = f in manifests[0] and manifests[0] or manifests[1]
1134 m = f in manifests[0] and manifests[0] or manifests[1]
1136 t = self.file(f).read(m[f])
1135 t = self.file(f).read(m[f])
1137 self.wwrite(f, t, m.flags(f))
1136 self.wwrite(f, t, m.flags(f))
1138 self.dirstate.normal(f)
1137 self.dirstate.normal(f)
1139 finally:
1138 finally:
1140 del wlock
1139 del wlock
1141
1140
1142 def copy(self, source, dest):
1141 def copy(self, source, dest):
1143 wlock = None
1142 wlock = None
1144 try:
1143 try:
1145 p = self.wjoin(dest)
1144 p = self.wjoin(dest)
1146 if not (os.path.exists(p) or os.path.islink(p)):
1145 if not (os.path.exists(p) or os.path.islink(p)):
1147 self.ui.warn(_("%s does not exist!\n") % dest)
1146 self.ui.warn(_("%s does not exist!\n") % dest)
1148 elif not (os.path.isfile(p) or os.path.islink(p)):
1147 elif not (os.path.isfile(p) or os.path.islink(p)):
1149 self.ui.warn(_("copy failed: %s is not a file or a "
1148 self.ui.warn(_("copy failed: %s is not a file or a "
1150 "symbolic link\n") % dest)
1149 "symbolic link\n") % dest)
1151 else:
1150 else:
1152 wlock = self.wlock()
1151 wlock = self.wlock()
1153 if dest not in self.dirstate:
1152 if dest not in self.dirstate:
1154 self.dirstate.add(dest)
1153 self.dirstate.add(dest)
1155 self.dirstate.copy(source, dest)
1154 self.dirstate.copy(source, dest)
1156 finally:
1155 finally:
1157 del wlock
1156 del wlock
1158
1157
1159 def heads(self, start=None):
1158 def heads(self, start=None):
1160 heads = self.changelog.heads(start)
1159 heads = self.changelog.heads(start)
1161 # sort the output in rev descending order
1160 # sort the output in rev descending order
1162 heads = [(-self.changelog.rev(h), h) for h in heads]
1161 heads = [(-self.changelog.rev(h), h) for h in heads]
1163 heads.sort()
1162 heads.sort()
1164 return [n for (r, n) in heads]
1163 return [n for (r, n) in heads]
1165
1164
1166 def branchheads(self, branch, start=None):
1165 def branchheads(self, branch, start=None):
1167 branches = self.branchtags()
1166 branches = self.branchtags()
1168 if branch not in branches:
1167 if branch not in branches:
1169 return []
1168 return []
1170 # The basic algorithm is this:
1169 # The basic algorithm is this:
1171 #
1170 #
1172 # Start from the branch tip since there are no later revisions that can
1171 # Start from the branch tip since there are no later revisions that can
1173 # possibly be in this branch, and the tip is a guaranteed head.
1172 # possibly be in this branch, and the tip is a guaranteed head.
1174 #
1173 #
1175 # Remember the tip's parents as the first ancestors, since these by
1174 # Remember the tip's parents as the first ancestors, since these by
1176 # definition are not heads.
1175 # definition are not heads.
1177 #
1176 #
1178 # Step backwards from the brach tip through all the revisions. We are
1177 # Step backwards from the brach tip through all the revisions. We are
1179 # guaranteed by the rules of Mercurial that we will now be visiting the
1178 # guaranteed by the rules of Mercurial that we will now be visiting the
1180 # nodes in reverse topological order (children before parents).
1179 # nodes in reverse topological order (children before parents).
1181 #
1180 #
1182 # If a revision is one of the ancestors of a head then we can toss it
1181 # If a revision is one of the ancestors of a head then we can toss it
1183 # out of the ancestors set (we've already found it and won't be
1182 # out of the ancestors set (we've already found it and won't be
1184 # visiting it again) and put its parents in the ancestors set.
1183 # visiting it again) and put its parents in the ancestors set.
1185 #
1184 #
1186 # Otherwise, if a revision is in the branch it's another head, since it
1185 # Otherwise, if a revision is in the branch it's another head, since it
1187 # wasn't in the ancestor list of an existing head. So add it to the
1186 # wasn't in the ancestor list of an existing head. So add it to the
1188 # head list, and add its parents to the ancestor list.
1187 # head list, and add its parents to the ancestor list.
1189 #
1188 #
1190 # If it is not in the branch ignore it.
1189 # If it is not in the branch ignore it.
1191 #
1190 #
1192 # Once we have a list of heads, use nodesbetween to filter out all the
1191 # Once we have a list of heads, use nodesbetween to filter out all the
1193 # heads that cannot be reached from startrev. There may be a more
1192 # heads that cannot be reached from startrev. There may be a more
1194 # efficient way to do this as part of the previous algorithm.
1193 # efficient way to do this as part of the previous algorithm.
1195
1194
1196 set = util.set
1195 set = util.set
1197 heads = [self.changelog.rev(branches[branch])]
1196 heads = [self.changelog.rev(branches[branch])]
1198 # Don't care if ancestors contains nullrev or not.
1197 # Don't care if ancestors contains nullrev or not.
1199 ancestors = set(self.changelog.parentrevs(heads[0]))
1198 ancestors = set(self.changelog.parentrevs(heads[0]))
1200 for rev in xrange(heads[0] - 1, nullrev, -1):
1199 for rev in xrange(heads[0] - 1, nullrev, -1):
1201 if rev in ancestors:
1200 if rev in ancestors:
1202 ancestors.update(self.changelog.parentrevs(rev))
1201 ancestors.update(self.changelog.parentrevs(rev))
1203 ancestors.remove(rev)
1202 ancestors.remove(rev)
1204 elif self.changectx(rev).branch() == branch:
1203 elif self.changectx(rev).branch() == branch:
1205 heads.append(rev)
1204 heads.append(rev)
1206 ancestors.update(self.changelog.parentrevs(rev))
1205 ancestors.update(self.changelog.parentrevs(rev))
1207 heads = [self.changelog.node(rev) for rev in heads]
1206 heads = [self.changelog.node(rev) for rev in heads]
1208 if start is not None:
1207 if start is not None:
1209 heads = self.changelog.nodesbetween([start], heads)[2]
1208 heads = self.changelog.nodesbetween([start], heads)[2]
1210 return heads
1209 return heads
1211
1210
1212 def branches(self, nodes):
1211 def branches(self, nodes):
1213 if not nodes:
1212 if not nodes:
1214 nodes = [self.changelog.tip()]
1213 nodes = [self.changelog.tip()]
1215 b = []
1214 b = []
1216 for n in nodes:
1215 for n in nodes:
1217 t = n
1216 t = n
1218 while 1:
1217 while 1:
1219 p = self.changelog.parents(n)
1218 p = self.changelog.parents(n)
1220 if p[1] != nullid or p[0] == nullid:
1219 if p[1] != nullid or p[0] == nullid:
1221 b.append((t, n, p[0], p[1]))
1220 b.append((t, n, p[0], p[1]))
1222 break
1221 break
1223 n = p[0]
1222 n = p[0]
1224 return b
1223 return b
1225
1224
1226 def between(self, pairs):
1225 def between(self, pairs):
1227 r = []
1226 r = []
1228
1227
1229 for top, bottom in pairs:
1228 for top, bottom in pairs:
1230 n, l, i = top, [], 0
1229 n, l, i = top, [], 0
1231 f = 1
1230 f = 1
1232
1231
1233 while n != bottom:
1232 while n != bottom:
1234 p = self.changelog.parents(n)[0]
1233 p = self.changelog.parents(n)[0]
1235 if i == f:
1234 if i == f:
1236 l.append(n)
1235 l.append(n)
1237 f = f * 2
1236 f = f * 2
1238 n = p
1237 n = p
1239 i += 1
1238 i += 1
1240
1239
1241 r.append(l)
1240 r.append(l)
1242
1241
1243 return r
1242 return r
1244
1243
1245 def findincoming(self, remote, base=None, heads=None, force=False):
1244 def findincoming(self, remote, base=None, heads=None, force=False):
1246 """Return list of roots of the subsets of missing nodes from remote
1245 """Return list of roots of the subsets of missing nodes from remote
1247
1246
1248 If base dict is specified, assume that these nodes and their parents
1247 If base dict is specified, assume that these nodes and their parents
1249 exist on the remote side and that no child of a node of base exists
1248 exist on the remote side and that no child of a node of base exists
1250 in both remote and self.
1249 in both remote and self.
1251 Furthermore base will be updated to include the nodes that exists
1250 Furthermore base will be updated to include the nodes that exists
1252 in self and remote but no children exists in self and remote.
1251 in self and remote but no children exists in self and remote.
1253 If a list of heads is specified, return only nodes which are heads
1252 If a list of heads is specified, return only nodes which are heads
1254 or ancestors of these heads.
1253 or ancestors of these heads.
1255
1254
1256 All the ancestors of base are in self and in remote.
1255 All the ancestors of base are in self and in remote.
1257 All the descendants of the list returned are missing in self.
1256 All the descendants of the list returned are missing in self.
1258 (and so we know that the rest of the nodes are missing in remote, see
1257 (and so we know that the rest of the nodes are missing in remote, see
1259 outgoing)
1258 outgoing)
1260 """
1259 """
1261 m = self.changelog.nodemap
1260 m = self.changelog.nodemap
1262 search = []
1261 search = []
1263 fetch = {}
1262 fetch = {}
1264 seen = {}
1263 seen = {}
1265 seenbranch = {}
1264 seenbranch = {}
1266 if base == None:
1265 if base == None:
1267 base = {}
1266 base = {}
1268
1267
1269 if not heads:
1268 if not heads:
1270 heads = remote.heads()
1269 heads = remote.heads()
1271
1270
1272 if self.changelog.tip() == nullid:
1271 if self.changelog.tip() == nullid:
1273 base[nullid] = 1
1272 base[nullid] = 1
1274 if heads != [nullid]:
1273 if heads != [nullid]:
1275 return [nullid]
1274 return [nullid]
1276 return []
1275 return []
1277
1276
1278 # assume we're closer to the tip than the root
1277 # assume we're closer to the tip than the root
1279 # and start by examining the heads
1278 # and start by examining the heads
1280 self.ui.status(_("searching for changes\n"))
1279 self.ui.status(_("searching for changes\n"))
1281
1280
1282 unknown = []
1281 unknown = []
1283 for h in heads:
1282 for h in heads:
1284 if h not in m:
1283 if h not in m:
1285 unknown.append(h)
1284 unknown.append(h)
1286 else:
1285 else:
1287 base[h] = 1
1286 base[h] = 1
1288
1287
1289 if not unknown:
1288 if not unknown:
1290 return []
1289 return []
1291
1290
1292 req = dict.fromkeys(unknown)
1291 req = dict.fromkeys(unknown)
1293 reqcnt = 0
1292 reqcnt = 0
1294
1293
1295 # search through remote branches
1294 # search through remote branches
1296 # a 'branch' here is a linear segment of history, with four parts:
1295 # a 'branch' here is a linear segment of history, with four parts:
1297 # head, root, first parent, second parent
1296 # head, root, first parent, second parent
1298 # (a branch always has two parents (or none) by definition)
1297 # (a branch always has two parents (or none) by definition)
1299 unknown = remote.branches(unknown)
1298 unknown = remote.branches(unknown)
1300 while unknown:
1299 while unknown:
1301 r = []
1300 r = []
1302 while unknown:
1301 while unknown:
1303 n = unknown.pop(0)
1302 n = unknown.pop(0)
1304 if n[0] in seen:
1303 if n[0] in seen:
1305 continue
1304 continue
1306
1305
1307 self.ui.debug(_("examining %s:%s\n")
1306 self.ui.debug(_("examining %s:%s\n")
1308 % (short(n[0]), short(n[1])))
1307 % (short(n[0]), short(n[1])))
1309 if n[0] == nullid: # found the end of the branch
1308 if n[0] == nullid: # found the end of the branch
1310 pass
1309 pass
1311 elif n in seenbranch:
1310 elif n in seenbranch:
1312 self.ui.debug(_("branch already found\n"))
1311 self.ui.debug(_("branch already found\n"))
1313 continue
1312 continue
1314 elif n[1] and n[1] in m: # do we know the base?
1313 elif n[1] and n[1] in m: # do we know the base?
1315 self.ui.debug(_("found incomplete branch %s:%s\n")
1314 self.ui.debug(_("found incomplete branch %s:%s\n")
1316 % (short(n[0]), short(n[1])))
1315 % (short(n[0]), short(n[1])))
1317 search.append(n) # schedule branch range for scanning
1316 search.append(n) # schedule branch range for scanning
1318 seenbranch[n] = 1
1317 seenbranch[n] = 1
1319 else:
1318 else:
1320 if n[1] not in seen and n[1] not in fetch:
1319 if n[1] not in seen and n[1] not in fetch:
1321 if n[2] in m and n[3] in m:
1320 if n[2] in m and n[3] in m:
1322 self.ui.debug(_("found new changeset %s\n") %
1321 self.ui.debug(_("found new changeset %s\n") %
1323 short(n[1]))
1322 short(n[1]))
1324 fetch[n[1]] = 1 # earliest unknown
1323 fetch[n[1]] = 1 # earliest unknown
1325 for p in n[2:4]:
1324 for p in n[2:4]:
1326 if p in m:
1325 if p in m:
1327 base[p] = 1 # latest known
1326 base[p] = 1 # latest known
1328
1327
1329 for p in n[2:4]:
1328 for p in n[2:4]:
1330 if p not in req and p not in m:
1329 if p not in req and p not in m:
1331 r.append(p)
1330 r.append(p)
1332 req[p] = 1
1331 req[p] = 1
1333 seen[n[0]] = 1
1332 seen[n[0]] = 1
1334
1333
1335 if r:
1334 if r:
1336 reqcnt += 1
1335 reqcnt += 1
1337 self.ui.debug(_("request %d: %s\n") %
1336 self.ui.debug(_("request %d: %s\n") %
1338 (reqcnt, " ".join(map(short, r))))
1337 (reqcnt, " ".join(map(short, r))))
1339 for p in xrange(0, len(r), 10):
1338 for p in xrange(0, len(r), 10):
1340 for b in remote.branches(r[p:p+10]):
1339 for b in remote.branches(r[p:p+10]):
1341 self.ui.debug(_("received %s:%s\n") %
1340 self.ui.debug(_("received %s:%s\n") %
1342 (short(b[0]), short(b[1])))
1341 (short(b[0]), short(b[1])))
1343 unknown.append(b)
1342 unknown.append(b)
1344
1343
1345 # do binary search on the branches we found
1344 # do binary search on the branches we found
1346 while search:
1345 while search:
1347 n = search.pop(0)
1346 n = search.pop(0)
1348 reqcnt += 1
1347 reqcnt += 1
1349 l = remote.between([(n[0], n[1])])[0]
1348 l = remote.between([(n[0], n[1])])[0]
1350 l.append(n[1])
1349 l.append(n[1])
1351 p = n[0]
1350 p = n[0]
1352 f = 1
1351 f = 1
1353 for i in l:
1352 for i in l:
1354 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1353 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1355 if i in m:
1354 if i in m:
1356 if f <= 2:
1355 if f <= 2:
1357 self.ui.debug(_("found new branch changeset %s\n") %
1356 self.ui.debug(_("found new branch changeset %s\n") %
1358 short(p))
1357 short(p))
1359 fetch[p] = 1
1358 fetch[p] = 1
1360 base[i] = 1
1359 base[i] = 1
1361 else:
1360 else:
1362 self.ui.debug(_("narrowed branch search to %s:%s\n")
1361 self.ui.debug(_("narrowed branch search to %s:%s\n")
1363 % (short(p), short(i)))
1362 % (short(p), short(i)))
1364 search.append((p, i))
1363 search.append((p, i))
1365 break
1364 break
1366 p, f = i, f * 2
1365 p, f = i, f * 2
1367
1366
1368 # sanity check our fetch list
1367 # sanity check our fetch list
1369 for f in fetch.keys():
1368 for f in fetch.keys():
1370 if f in m:
1369 if f in m:
1371 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1370 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1372
1371
1373 if base.keys() == [nullid]:
1372 if base.keys() == [nullid]:
1374 if force:
1373 if force:
1375 self.ui.warn(_("warning: repository is unrelated\n"))
1374 self.ui.warn(_("warning: repository is unrelated\n"))
1376 else:
1375 else:
1377 raise util.Abort(_("repository is unrelated"))
1376 raise util.Abort(_("repository is unrelated"))
1378
1377
1379 self.ui.debug(_("found new changesets starting at ") +
1378 self.ui.debug(_("found new changesets starting at ") +
1380 " ".join([short(f) for f in fetch]) + "\n")
1379 " ".join([short(f) for f in fetch]) + "\n")
1381
1380
1382 self.ui.debug(_("%d total queries\n") % reqcnt)
1381 self.ui.debug(_("%d total queries\n") % reqcnt)
1383
1382
1384 return fetch.keys()
1383 return fetch.keys()
1385
1384
1386 def findoutgoing(self, remote, base=None, heads=None, force=False):
1385 def findoutgoing(self, remote, base=None, heads=None, force=False):
1387 """Return list of nodes that are roots of subsets not in remote
1386 """Return list of nodes that are roots of subsets not in remote
1388
1387
1389 If base dict is specified, assume that these nodes and their parents
1388 If base dict is specified, assume that these nodes and their parents
1390 exist on the remote side.
1389 exist on the remote side.
1391 If a list of heads is specified, return only nodes which are heads
1390 If a list of heads is specified, return only nodes which are heads
1392 or ancestors of these heads, and return a second element which
1391 or ancestors of these heads, and return a second element which
1393 contains all remote heads which get new children.
1392 contains all remote heads which get new children.
1394 """
1393 """
1395 if base == None:
1394 if base == None:
1396 base = {}
1395 base = {}
1397 self.findincoming(remote, base, heads, force=force)
1396 self.findincoming(remote, base, heads, force=force)
1398
1397
1399 self.ui.debug(_("common changesets up to ")
1398 self.ui.debug(_("common changesets up to ")
1400 + " ".join(map(short, base.keys())) + "\n")
1399 + " ".join(map(short, base.keys())) + "\n")
1401
1400
1402 remain = dict.fromkeys(self.changelog.nodemap)
1401 remain = dict.fromkeys(self.changelog.nodemap)
1403
1402
1404 # prune everything remote has from the tree
1403 # prune everything remote has from the tree
1405 del remain[nullid]
1404 del remain[nullid]
1406 remove = base.keys()
1405 remove = base.keys()
1407 while remove:
1406 while remove:
1408 n = remove.pop(0)
1407 n = remove.pop(0)
1409 if n in remain:
1408 if n in remain:
1410 del remain[n]
1409 del remain[n]
1411 for p in self.changelog.parents(n):
1410 for p in self.changelog.parents(n):
1412 remove.append(p)
1411 remove.append(p)
1413
1412
1414 # find every node whose parents have been pruned
1413 # find every node whose parents have been pruned
1415 subset = []
1414 subset = []
1416 # find every remote head that will get new children
1415 # find every remote head that will get new children
1417 updated_heads = {}
1416 updated_heads = {}
1418 for n in remain:
1417 for n in remain:
1419 p1, p2 = self.changelog.parents(n)
1418 p1, p2 = self.changelog.parents(n)
1420 if p1 not in remain and p2 not in remain:
1419 if p1 not in remain and p2 not in remain:
1421 subset.append(n)
1420 subset.append(n)
1422 if heads:
1421 if heads:
1423 if p1 in heads:
1422 if p1 in heads:
1424 updated_heads[p1] = True
1423 updated_heads[p1] = True
1425 if p2 in heads:
1424 if p2 in heads:
1426 updated_heads[p2] = True
1425 updated_heads[p2] = True
1427
1426
1428 # this is the set of all roots we have to push
1427 # this is the set of all roots we have to push
1429 if heads:
1428 if heads:
1430 return subset, updated_heads.keys()
1429 return subset, updated_heads.keys()
1431 else:
1430 else:
1432 return subset
1431 return subset
1433
1432
1434 def pull(self, remote, heads=None, force=False):
1433 def pull(self, remote, heads=None, force=False):
1435 lock = self.lock()
1434 lock = self.lock()
1436 try:
1435 try:
1437 fetch = self.findincoming(remote, heads=heads, force=force)
1436 fetch = self.findincoming(remote, heads=heads, force=force)
1438 if fetch == [nullid]:
1437 if fetch == [nullid]:
1439 self.ui.status(_("requesting all changes\n"))
1438 self.ui.status(_("requesting all changes\n"))
1440
1439
1441 if not fetch:
1440 if not fetch:
1442 self.ui.status(_("no changes found\n"))
1441 self.ui.status(_("no changes found\n"))
1443 return 0
1442 return 0
1444
1443
1445 if heads is None:
1444 if heads is None:
1446 cg = remote.changegroup(fetch, 'pull')
1445 cg = remote.changegroup(fetch, 'pull')
1447 else:
1446 else:
1448 if 'changegroupsubset' not in remote.capabilities:
1447 if 'changegroupsubset' not in remote.capabilities:
1449 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1448 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1450 cg = remote.changegroupsubset(fetch, heads, 'pull')
1449 cg = remote.changegroupsubset(fetch, heads, 'pull')
1451 return self.addchangegroup(cg, 'pull', remote.url())
1450 return self.addchangegroup(cg, 'pull', remote.url())
1452 finally:
1451 finally:
1453 del lock
1452 del lock
1454
1453
1455 def push(self, remote, force=False, revs=None):
1454 def push(self, remote, force=False, revs=None):
1456 # there are two ways to push to remote repo:
1455 # there are two ways to push to remote repo:
1457 #
1456 #
1458 # addchangegroup assumes local user can lock remote
1457 # addchangegroup assumes local user can lock remote
1459 # repo (local filesystem, old ssh servers).
1458 # repo (local filesystem, old ssh servers).
1460 #
1459 #
1461 # unbundle assumes local user cannot lock remote repo (new ssh
1460 # unbundle assumes local user cannot lock remote repo (new ssh
1462 # servers, http servers).
1461 # servers, http servers).
1463
1462
1464 if remote.capable('unbundle'):
1463 if remote.capable('unbundle'):
1465 return self.push_unbundle(remote, force, revs)
1464 return self.push_unbundle(remote, force, revs)
1466 return self.push_addchangegroup(remote, force, revs)
1465 return self.push_addchangegroup(remote, force, revs)
1467
1466
1468 def prepush(self, remote, force, revs):
1467 def prepush(self, remote, force, revs):
1469 base = {}
1468 base = {}
1470 remote_heads = remote.heads()
1469 remote_heads = remote.heads()
1471 inc = self.findincoming(remote, base, remote_heads, force=force)
1470 inc = self.findincoming(remote, base, remote_heads, force=force)
1472
1471
1473 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1472 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1474 if revs is not None:
1473 if revs is not None:
1475 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1474 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1476 else:
1475 else:
1477 bases, heads = update, self.changelog.heads()
1476 bases, heads = update, self.changelog.heads()
1478
1477
1479 if not bases:
1478 if not bases:
1480 self.ui.status(_("no changes found\n"))
1479 self.ui.status(_("no changes found\n"))
1481 return None, 1
1480 return None, 1
1482 elif not force:
1481 elif not force:
1483 # check if we're creating new remote heads
1482 # check if we're creating new remote heads
1484 # to be a remote head after push, node must be either
1483 # to be a remote head after push, node must be either
1485 # - unknown locally
1484 # - unknown locally
1486 # - a local outgoing head descended from update
1485 # - a local outgoing head descended from update
1487 # - a remote head that's known locally and not
1486 # - a remote head that's known locally and not
1488 # ancestral to an outgoing head
1487 # ancestral to an outgoing head
1489
1488
1490 warn = 0
1489 warn = 0
1491
1490
1492 if remote_heads == [nullid]:
1491 if remote_heads == [nullid]:
1493 warn = 0
1492 warn = 0
1494 elif not revs and len(heads) > len(remote_heads):
1493 elif not revs and len(heads) > len(remote_heads):
1495 warn = 1
1494 warn = 1
1496 else:
1495 else:
1497 newheads = list(heads)
1496 newheads = list(heads)
1498 for r in remote_heads:
1497 for r in remote_heads:
1499 if r in self.changelog.nodemap:
1498 if r in self.changelog.nodemap:
1500 desc = self.changelog.heads(r, heads)
1499 desc = self.changelog.heads(r, heads)
1501 l = [h for h in heads if h in desc]
1500 l = [h for h in heads if h in desc]
1502 if not l:
1501 if not l:
1503 newheads.append(r)
1502 newheads.append(r)
1504 else:
1503 else:
1505 newheads.append(r)
1504 newheads.append(r)
1506 if len(newheads) > len(remote_heads):
1505 if len(newheads) > len(remote_heads):
1507 warn = 1
1506 warn = 1
1508
1507
1509 if warn:
1508 if warn:
1510 self.ui.warn(_("abort: push creates new remote branches!\n"))
1509 self.ui.warn(_("abort: push creates new remote branches!\n"))
1511 self.ui.status(_("(did you forget to merge?"
1510 self.ui.status(_("(did you forget to merge?"
1512 " use push -f to force)\n"))
1511 " use push -f to force)\n"))
1513 return None, 1
1512 return None, 1
1514 elif inc:
1513 elif inc:
1515 self.ui.warn(_("note: unsynced remote changes!\n"))
1514 self.ui.warn(_("note: unsynced remote changes!\n"))
1516
1515
1517
1516
1518 if revs is None:
1517 if revs is None:
1519 cg = self.changegroup(update, 'push')
1518 cg = self.changegroup(update, 'push')
1520 else:
1519 else:
1521 cg = self.changegroupsubset(update, revs, 'push')
1520 cg = self.changegroupsubset(update, revs, 'push')
1522 return cg, remote_heads
1521 return cg, remote_heads
1523
1522
1524 def push_addchangegroup(self, remote, force, revs):
1523 def push_addchangegroup(self, remote, force, revs):
1525 lock = remote.lock()
1524 lock = remote.lock()
1526 try:
1525 try:
1527 ret = self.prepush(remote, force, revs)
1526 ret = self.prepush(remote, force, revs)
1528 if ret[0] is not None:
1527 if ret[0] is not None:
1529 cg, remote_heads = ret
1528 cg, remote_heads = ret
1530 return remote.addchangegroup(cg, 'push', self.url())
1529 return remote.addchangegroup(cg, 'push', self.url())
1531 return ret[1]
1530 return ret[1]
1532 finally:
1531 finally:
1533 del lock
1532 del lock
1534
1533
1535 def push_unbundle(self, remote, force, revs):
1534 def push_unbundle(self, remote, force, revs):
1536 # local repo finds heads on server, finds out what revs it
1535 # local repo finds heads on server, finds out what revs it
1537 # must push. once revs transferred, if server finds it has
1536 # must push. once revs transferred, if server finds it has
1538 # different heads (someone else won commit/push race), server
1537 # different heads (someone else won commit/push race), server
1539 # aborts.
1538 # aborts.
1540
1539
1541 ret = self.prepush(remote, force, revs)
1540 ret = self.prepush(remote, force, revs)
1542 if ret[0] is not None:
1541 if ret[0] is not None:
1543 cg, remote_heads = ret
1542 cg, remote_heads = ret
1544 if force: remote_heads = ['force']
1543 if force: remote_heads = ['force']
1545 return remote.unbundle(cg, remote_heads, 'push')
1544 return remote.unbundle(cg, remote_heads, 'push')
1546 return ret[1]
1545 return ret[1]
1547
1546
1548 def changegroupinfo(self, nodes, source):
1547 def changegroupinfo(self, nodes, source):
1549 if self.ui.verbose or source == 'bundle':
1548 if self.ui.verbose or source == 'bundle':
1550 self.ui.status(_("%d changesets found\n") % len(nodes))
1549 self.ui.status(_("%d changesets found\n") % len(nodes))
1551 if self.ui.debugflag:
1550 if self.ui.debugflag:
1552 self.ui.debug(_("List of changesets:\n"))
1551 self.ui.debug(_("List of changesets:\n"))
1553 for node in nodes:
1552 for node in nodes:
1554 self.ui.debug("%s\n" % hex(node))
1553 self.ui.debug("%s\n" % hex(node))
1555
1554
1556 def changegroupsubset(self, bases, heads, source, extranodes=None):
1555 def changegroupsubset(self, bases, heads, source, extranodes=None):
1557 """This function generates a changegroup consisting of all the nodes
1556 """This function generates a changegroup consisting of all the nodes
1558 that are descendents of any of the bases, and ancestors of any of
1557 that are descendents of any of the bases, and ancestors of any of
1559 the heads.
1558 the heads.
1560
1559
1561 It is fairly complex as determining which filenodes and which
1560 It is fairly complex as determining which filenodes and which
1562 manifest nodes need to be included for the changeset to be complete
1561 manifest nodes need to be included for the changeset to be complete
1563 is non-trivial.
1562 is non-trivial.
1564
1563
1565 Another wrinkle is doing the reverse, figuring out which changeset in
1564 Another wrinkle is doing the reverse, figuring out which changeset in
1566 the changegroup a particular filenode or manifestnode belongs to.
1565 the changegroup a particular filenode or manifestnode belongs to.
1567
1566
1568 The caller can specify some nodes that must be included in the
1567 The caller can specify some nodes that must be included in the
1569 changegroup using the extranodes argument. It should be a dict
1568 changegroup using the extranodes argument. It should be a dict
1570 where the keys are the filenames (or 1 for the manifest), and the
1569 where the keys are the filenames (or 1 for the manifest), and the
1571 values are lists of (node, linknode) tuples, where node is a wanted
1570 values are lists of (node, linknode) tuples, where node is a wanted
1572 node and linknode is the changelog node that should be transmitted as
1571 node and linknode is the changelog node that should be transmitted as
1573 the linkrev.
1572 the linkrev.
1574 """
1573 """
1575
1574
1576 self.hook('preoutgoing', throw=True, source=source)
1575 self.hook('preoutgoing', throw=True, source=source)
1577
1576
1578 # Set up some initial variables
1577 # Set up some initial variables
1579 # Make it easy to refer to self.changelog
1578 # Make it easy to refer to self.changelog
1580 cl = self.changelog
1579 cl = self.changelog
1581 # msng is short for missing - compute the list of changesets in this
1580 # msng is short for missing - compute the list of changesets in this
1582 # changegroup.
1581 # changegroup.
1583 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1582 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1584 self.changegroupinfo(msng_cl_lst, source)
1583 self.changegroupinfo(msng_cl_lst, source)
1585 # Some bases may turn out to be superfluous, and some heads may be
1584 # Some bases may turn out to be superfluous, and some heads may be
1586 # too. nodesbetween will return the minimal set of bases and heads
1585 # too. nodesbetween will return the minimal set of bases and heads
1587 # necessary to re-create the changegroup.
1586 # necessary to re-create the changegroup.
1588
1587
1589 # Known heads are the list of heads that it is assumed the recipient
1588 # Known heads are the list of heads that it is assumed the recipient
1590 # of this changegroup will know about.
1589 # of this changegroup will know about.
1591 knownheads = {}
1590 knownheads = {}
1592 # We assume that all parents of bases are known heads.
1591 # We assume that all parents of bases are known heads.
1593 for n in bases:
1592 for n in bases:
1594 for p in cl.parents(n):
1593 for p in cl.parents(n):
1595 if p != nullid:
1594 if p != nullid:
1596 knownheads[p] = 1
1595 knownheads[p] = 1
1597 knownheads = knownheads.keys()
1596 knownheads = knownheads.keys()
1598 if knownheads:
1597 if knownheads:
1599 # Now that we know what heads are known, we can compute which
1598 # Now that we know what heads are known, we can compute which
1600 # changesets are known. The recipient must know about all
1599 # changesets are known. The recipient must know about all
1601 # changesets required to reach the known heads from the null
1600 # changesets required to reach the known heads from the null
1602 # changeset.
1601 # changeset.
1603 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1602 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1604 junk = None
1603 junk = None
1605 # Transform the list into an ersatz set.
1604 # Transform the list into an ersatz set.
1606 has_cl_set = dict.fromkeys(has_cl_set)
1605 has_cl_set = dict.fromkeys(has_cl_set)
1607 else:
1606 else:
1608 # If there were no known heads, the recipient cannot be assumed to
1607 # If there were no known heads, the recipient cannot be assumed to
1609 # know about any changesets.
1608 # know about any changesets.
1610 has_cl_set = {}
1609 has_cl_set = {}
1611
1610
1612 # Make it easy to refer to self.manifest
1611 # Make it easy to refer to self.manifest
1613 mnfst = self.manifest
1612 mnfst = self.manifest
1614 # We don't know which manifests are missing yet
1613 # We don't know which manifests are missing yet
1615 msng_mnfst_set = {}
1614 msng_mnfst_set = {}
1616 # Nor do we know which filenodes are missing.
1615 # Nor do we know which filenodes are missing.
1617 msng_filenode_set = {}
1616 msng_filenode_set = {}
1618
1617
1619 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1618 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1620 junk = None
1619 junk = None
1621
1620
1622 # A changeset always belongs to itself, so the changenode lookup
1621 # A changeset always belongs to itself, so the changenode lookup
1623 # function for a changenode is identity.
1622 # function for a changenode is identity.
1624 def identity(x):
1623 def identity(x):
1625 return x
1624 return x
1626
1625
1627 # A function generating function. Sets up an environment for the
1626 # A function generating function. Sets up an environment for the
1628 # inner function.
1627 # inner function.
1629 def cmp_by_rev_func(revlog):
1628 def cmp_by_rev_func(revlog):
1630 # Compare two nodes by their revision number in the environment's
1629 # Compare two nodes by their revision number in the environment's
1631 # revision history. Since the revision number both represents the
1630 # revision history. Since the revision number both represents the
1632 # most efficient order to read the nodes in, and represents a
1631 # most efficient order to read the nodes in, and represents a
1633 # topological sorting of the nodes, this function is often useful.
1632 # topological sorting of the nodes, this function is often useful.
1634 def cmp_by_rev(a, b):
1633 def cmp_by_rev(a, b):
1635 return cmp(revlog.rev(a), revlog.rev(b))
1634 return cmp(revlog.rev(a), revlog.rev(b))
1636 return cmp_by_rev
1635 return cmp_by_rev
1637
1636
1638 # If we determine that a particular file or manifest node must be a
1637 # If we determine that a particular file or manifest node must be a
1639 # node that the recipient of the changegroup will already have, we can
1638 # node that the recipient of the changegroup will already have, we can
1640 # also assume the recipient will have all the parents. This function
1639 # also assume the recipient will have all the parents. This function
1641 # prunes them from the set of missing nodes.
1640 # prunes them from the set of missing nodes.
1642 def prune_parents(revlog, hasset, msngset):
1641 def prune_parents(revlog, hasset, msngset):
1643 haslst = hasset.keys()
1642 haslst = hasset.keys()
1644 haslst.sort(cmp_by_rev_func(revlog))
1643 haslst.sort(cmp_by_rev_func(revlog))
1645 for node in haslst:
1644 for node in haslst:
1646 parentlst = [p for p in revlog.parents(node) if p != nullid]
1645 parentlst = [p for p in revlog.parents(node) if p != nullid]
1647 while parentlst:
1646 while parentlst:
1648 n = parentlst.pop()
1647 n = parentlst.pop()
1649 if n not in hasset:
1648 if n not in hasset:
1650 hasset[n] = 1
1649 hasset[n] = 1
1651 p = [p for p in revlog.parents(n) if p != nullid]
1650 p = [p for p in revlog.parents(n) if p != nullid]
1652 parentlst.extend(p)
1651 parentlst.extend(p)
1653 for n in hasset:
1652 for n in hasset:
1654 msngset.pop(n, None)
1653 msngset.pop(n, None)
1655
1654
1656 # This is a function generating function used to set up an environment
1655 # This is a function generating function used to set up an environment
1657 # for the inner function to execute in.
1656 # for the inner function to execute in.
1658 def manifest_and_file_collector(changedfileset):
1657 def manifest_and_file_collector(changedfileset):
1659 # This is an information gathering function that gathers
1658 # This is an information gathering function that gathers
1660 # information from each changeset node that goes out as part of
1659 # information from each changeset node that goes out as part of
1661 # the changegroup. The information gathered is a list of which
1660 # the changegroup. The information gathered is a list of which
1662 # manifest nodes are potentially required (the recipient may
1661 # manifest nodes are potentially required (the recipient may
1663 # already have them) and total list of all files which were
1662 # already have them) and total list of all files which were
1664 # changed in any changeset in the changegroup.
1663 # changed in any changeset in the changegroup.
1665 #
1664 #
1666 # We also remember the first changenode we saw any manifest
1665 # We also remember the first changenode we saw any manifest
1667 # referenced by so we can later determine which changenode 'owns'
1666 # referenced by so we can later determine which changenode 'owns'
1668 # the manifest.
1667 # the manifest.
1669 def collect_manifests_and_files(clnode):
1668 def collect_manifests_and_files(clnode):
1670 c = cl.read(clnode)
1669 c = cl.read(clnode)
1671 for f in c[3]:
1670 for f in c[3]:
1672 # This is to make sure we only have one instance of each
1671 # This is to make sure we only have one instance of each
1673 # filename string for each filename.
1672 # filename string for each filename.
1674 changedfileset.setdefault(f, f)
1673 changedfileset.setdefault(f, f)
1675 msng_mnfst_set.setdefault(c[0], clnode)
1674 msng_mnfst_set.setdefault(c[0], clnode)
1676 return collect_manifests_and_files
1675 return collect_manifests_and_files
1677
1676
1678 # Figure out which manifest nodes (of the ones we think might be part
1677 # Figure out which manifest nodes (of the ones we think might be part
1679 # of the changegroup) the recipient must know about and remove them
1678 # of the changegroup) the recipient must know about and remove them
1680 # from the changegroup.
1679 # from the changegroup.
1681 def prune_manifests():
1680 def prune_manifests():
1682 has_mnfst_set = {}
1681 has_mnfst_set = {}
1683 for n in msng_mnfst_set:
1682 for n in msng_mnfst_set:
1684 # If a 'missing' manifest thinks it belongs to a changenode
1683 # If a 'missing' manifest thinks it belongs to a changenode
1685 # the recipient is assumed to have, obviously the recipient
1684 # the recipient is assumed to have, obviously the recipient
1686 # must have that manifest.
1685 # must have that manifest.
1687 linknode = cl.node(mnfst.linkrev(n))
1686 linknode = cl.node(mnfst.linkrev(n))
1688 if linknode in has_cl_set:
1687 if linknode in has_cl_set:
1689 has_mnfst_set[n] = 1
1688 has_mnfst_set[n] = 1
1690 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1689 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1691
1690
1692 # Use the information collected in collect_manifests_and_files to say
1691 # Use the information collected in collect_manifests_and_files to say
1693 # which changenode any manifestnode belongs to.
1692 # which changenode any manifestnode belongs to.
1694 def lookup_manifest_link(mnfstnode):
1693 def lookup_manifest_link(mnfstnode):
1695 return msng_mnfst_set[mnfstnode]
1694 return msng_mnfst_set[mnfstnode]
1696
1695
1697 # A function generating function that sets up the initial environment
1696 # A function generating function that sets up the initial environment
1698 # the inner function.
1697 # the inner function.
1699 def filenode_collector(changedfiles):
1698 def filenode_collector(changedfiles):
1700 next_rev = [0]
1699 next_rev = [0]
1701 # This gathers information from each manifestnode included in the
1700 # This gathers information from each manifestnode included in the
1702 # changegroup about which filenodes the manifest node references
1701 # changegroup about which filenodes the manifest node references
1703 # so we can include those in the changegroup too.
1702 # so we can include those in the changegroup too.
1704 #
1703 #
1705 # It also remembers which changenode each filenode belongs to. It
1704 # It also remembers which changenode each filenode belongs to. It
1706 # does this by assuming the a filenode belongs to the changenode
1705 # does this by assuming the a filenode belongs to the changenode
1707 # the first manifest that references it belongs to.
1706 # the first manifest that references it belongs to.
1708 def collect_msng_filenodes(mnfstnode):
1707 def collect_msng_filenodes(mnfstnode):
1709 r = mnfst.rev(mnfstnode)
1708 r = mnfst.rev(mnfstnode)
1710 if r == next_rev[0]:
1709 if r == next_rev[0]:
1711 # If the last rev we looked at was the one just previous,
1710 # If the last rev we looked at was the one just previous,
1712 # we only need to see a diff.
1711 # we only need to see a diff.
1713 deltamf = mnfst.readdelta(mnfstnode)
1712 deltamf = mnfst.readdelta(mnfstnode)
1714 # For each line in the delta
1713 # For each line in the delta
1715 for f, fnode in deltamf.items():
1714 for f, fnode in deltamf.items():
1716 f = changedfiles.get(f, None)
1715 f = changedfiles.get(f, None)
1717 # And if the file is in the list of files we care
1716 # And if the file is in the list of files we care
1718 # about.
1717 # about.
1719 if f is not None:
1718 if f is not None:
1720 # Get the changenode this manifest belongs to
1719 # Get the changenode this manifest belongs to
1721 clnode = msng_mnfst_set[mnfstnode]
1720 clnode = msng_mnfst_set[mnfstnode]
1722 # Create the set of filenodes for the file if
1721 # Create the set of filenodes for the file if
1723 # there isn't one already.
1722 # there isn't one already.
1724 ndset = msng_filenode_set.setdefault(f, {})
1723 ndset = msng_filenode_set.setdefault(f, {})
1725 # And set the filenode's changelog node to the
1724 # And set the filenode's changelog node to the
1726 # manifest's if it hasn't been set already.
1725 # manifest's if it hasn't been set already.
1727 ndset.setdefault(fnode, clnode)
1726 ndset.setdefault(fnode, clnode)
1728 else:
1727 else:
1729 # Otherwise we need a full manifest.
1728 # Otherwise we need a full manifest.
1730 m = mnfst.read(mnfstnode)
1729 m = mnfst.read(mnfstnode)
1731 # For every file in we care about.
1730 # For every file in we care about.
1732 for f in changedfiles:
1731 for f in changedfiles:
1733 fnode = m.get(f, None)
1732 fnode = m.get(f, None)
1734 # If it's in the manifest
1733 # If it's in the manifest
1735 if fnode is not None:
1734 if fnode is not None:
1736 # See comments above.
1735 # See comments above.
1737 clnode = msng_mnfst_set[mnfstnode]
1736 clnode = msng_mnfst_set[mnfstnode]
1738 ndset = msng_filenode_set.setdefault(f, {})
1737 ndset = msng_filenode_set.setdefault(f, {})
1739 ndset.setdefault(fnode, clnode)
1738 ndset.setdefault(fnode, clnode)
1740 # Remember the revision we hope to see next.
1739 # Remember the revision we hope to see next.
1741 next_rev[0] = r + 1
1740 next_rev[0] = r + 1
1742 return collect_msng_filenodes
1741 return collect_msng_filenodes
1743
1742
1744 # We have a list of filenodes we think we need for a file, lets remove
1743 # We have a list of filenodes we think we need for a file, lets remove
1745 # all those we now the recipient must have.
1744 # all those we now the recipient must have.
1746 def prune_filenodes(f, filerevlog):
1745 def prune_filenodes(f, filerevlog):
1747 msngset = msng_filenode_set[f]
1746 msngset = msng_filenode_set[f]
1748 hasset = {}
1747 hasset = {}
1749 # If a 'missing' filenode thinks it belongs to a changenode we
1748 # If a 'missing' filenode thinks it belongs to a changenode we
1750 # assume the recipient must have, then the recipient must have
1749 # assume the recipient must have, then the recipient must have
1751 # that filenode.
1750 # that filenode.
1752 for n in msngset:
1751 for n in msngset:
1753 clnode = cl.node(filerevlog.linkrev(n))
1752 clnode = cl.node(filerevlog.linkrev(n))
1754 if clnode in has_cl_set:
1753 if clnode in has_cl_set:
1755 hasset[n] = 1
1754 hasset[n] = 1
1756 prune_parents(filerevlog, hasset, msngset)
1755 prune_parents(filerevlog, hasset, msngset)
1757
1756
1758 # A function generator function that sets up the a context for the
1757 # A function generator function that sets up the a context for the
1759 # inner function.
1758 # inner function.
1760 def lookup_filenode_link_func(fname):
1759 def lookup_filenode_link_func(fname):
1761 msngset = msng_filenode_set[fname]
1760 msngset = msng_filenode_set[fname]
1762 # Lookup the changenode the filenode belongs to.
1761 # Lookup the changenode the filenode belongs to.
1763 def lookup_filenode_link(fnode):
1762 def lookup_filenode_link(fnode):
1764 return msngset[fnode]
1763 return msngset[fnode]
1765 return lookup_filenode_link
1764 return lookup_filenode_link
1766
1765
1767 # Add the nodes that were explicitly requested.
1766 # Add the nodes that were explicitly requested.
1768 def add_extra_nodes(name, nodes):
1767 def add_extra_nodes(name, nodes):
1769 if not extranodes or name not in extranodes:
1768 if not extranodes or name not in extranodes:
1770 return
1769 return
1771
1770
1772 for node, linknode in extranodes[name]:
1771 for node, linknode in extranodes[name]:
1773 if node not in nodes:
1772 if node not in nodes:
1774 nodes[node] = linknode
1773 nodes[node] = linknode
1775
1774
1776 # Now that we have all theses utility functions to help out and
1775 # Now that we have all theses utility functions to help out and
1777 # logically divide up the task, generate the group.
1776 # logically divide up the task, generate the group.
1778 def gengroup():
1777 def gengroup():
1779 # The set of changed files starts empty.
1778 # The set of changed files starts empty.
1780 changedfiles = {}
1779 changedfiles = {}
1781 # Create a changenode group generator that will call our functions
1780 # Create a changenode group generator that will call our functions
1782 # back to lookup the owning changenode and collect information.
1781 # back to lookup the owning changenode and collect information.
1783 group = cl.group(msng_cl_lst, identity,
1782 group = cl.group(msng_cl_lst, identity,
1784 manifest_and_file_collector(changedfiles))
1783 manifest_and_file_collector(changedfiles))
1785 for chnk in group:
1784 for chnk in group:
1786 yield chnk
1785 yield chnk
1787
1786
1788 # The list of manifests has been collected by the generator
1787 # The list of manifests has been collected by the generator
1789 # calling our functions back.
1788 # calling our functions back.
1790 prune_manifests()
1789 prune_manifests()
1791 add_extra_nodes(1, msng_mnfst_set)
1790 add_extra_nodes(1, msng_mnfst_set)
1792 msng_mnfst_lst = msng_mnfst_set.keys()
1791 msng_mnfst_lst = msng_mnfst_set.keys()
1793 # Sort the manifestnodes by revision number.
1792 # Sort the manifestnodes by revision number.
1794 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1793 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1795 # Create a generator for the manifestnodes that calls our lookup
1794 # Create a generator for the manifestnodes that calls our lookup
1796 # and data collection functions back.
1795 # and data collection functions back.
1797 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1796 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1798 filenode_collector(changedfiles))
1797 filenode_collector(changedfiles))
1799 for chnk in group:
1798 for chnk in group:
1800 yield chnk
1799 yield chnk
1801
1800
1802 # These are no longer needed, dereference and toss the memory for
1801 # These are no longer needed, dereference and toss the memory for
1803 # them.
1802 # them.
1804 msng_mnfst_lst = None
1803 msng_mnfst_lst = None
1805 msng_mnfst_set.clear()
1804 msng_mnfst_set.clear()
1806
1805
1807 if extranodes:
1806 if extranodes:
1808 for fname in extranodes:
1807 for fname in extranodes:
1809 if isinstance(fname, int):
1808 if isinstance(fname, int):
1810 continue
1809 continue
1811 add_extra_nodes(fname,
1810 add_extra_nodes(fname,
1812 msng_filenode_set.setdefault(fname, {}))
1811 msng_filenode_set.setdefault(fname, {}))
1813 changedfiles[fname] = 1
1812 changedfiles[fname] = 1
1814 changedfiles = changedfiles.keys()
1813 changedfiles = changedfiles.keys()
1815 changedfiles.sort()
1814 changedfiles.sort()
1816 # Go through all our files in order sorted by name.
1815 # Go through all our files in order sorted by name.
1817 for fname in changedfiles:
1816 for fname in changedfiles:
1818 filerevlog = self.file(fname)
1817 filerevlog = self.file(fname)
1819 if filerevlog.count() == 0:
1818 if filerevlog.count() == 0:
1820 raise util.Abort(_("empty or missing revlog for %s") % fname)
1819 raise util.Abort(_("empty or missing revlog for %s") % fname)
1821 # Toss out the filenodes that the recipient isn't really
1820 # Toss out the filenodes that the recipient isn't really
1822 # missing.
1821 # missing.
1823 if fname in msng_filenode_set:
1822 if fname in msng_filenode_set:
1824 prune_filenodes(fname, filerevlog)
1823 prune_filenodes(fname, filerevlog)
1825 msng_filenode_lst = msng_filenode_set[fname].keys()
1824 msng_filenode_lst = msng_filenode_set[fname].keys()
1826 else:
1825 else:
1827 msng_filenode_lst = []
1826 msng_filenode_lst = []
1828 # If any filenodes are left, generate the group for them,
1827 # If any filenodes are left, generate the group for them,
1829 # otherwise don't bother.
1828 # otherwise don't bother.
1830 if len(msng_filenode_lst) > 0:
1829 if len(msng_filenode_lst) > 0:
1831 yield changegroup.chunkheader(len(fname))
1830 yield changegroup.chunkheader(len(fname))
1832 yield fname
1831 yield fname
1833 # Sort the filenodes by their revision #
1832 # Sort the filenodes by their revision #
1834 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1833 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1835 # Create a group generator and only pass in a changenode
1834 # Create a group generator and only pass in a changenode
1836 # lookup function as we need to collect no information
1835 # lookup function as we need to collect no information
1837 # from filenodes.
1836 # from filenodes.
1838 group = filerevlog.group(msng_filenode_lst,
1837 group = filerevlog.group(msng_filenode_lst,
1839 lookup_filenode_link_func(fname))
1838 lookup_filenode_link_func(fname))
1840 for chnk in group:
1839 for chnk in group:
1841 yield chnk
1840 yield chnk
1842 if fname in msng_filenode_set:
1841 if fname in msng_filenode_set:
1843 # Don't need this anymore, toss it to free memory.
1842 # Don't need this anymore, toss it to free memory.
1844 del msng_filenode_set[fname]
1843 del msng_filenode_set[fname]
1845 # Signal that no more groups are left.
1844 # Signal that no more groups are left.
1846 yield changegroup.closechunk()
1845 yield changegroup.closechunk()
1847
1846
1848 if msng_cl_lst:
1847 if msng_cl_lst:
1849 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1848 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1850
1849
1851 return util.chunkbuffer(gengroup())
1850 return util.chunkbuffer(gengroup())
1852
1851
1853 def changegroup(self, basenodes, source):
1852 def changegroup(self, basenodes, source):
1854 """Generate a changegroup of all nodes that we have that a recipient
1853 """Generate a changegroup of all nodes that we have that a recipient
1855 doesn't.
1854 doesn't.
1856
1855
1857 This is much easier than the previous function as we can assume that
1856 This is much easier than the previous function as we can assume that
1858 the recipient has any changenode we aren't sending them."""
1857 the recipient has any changenode we aren't sending them."""
1859
1858
1860 self.hook('preoutgoing', throw=True, source=source)
1859 self.hook('preoutgoing', throw=True, source=source)
1861
1860
1862 cl = self.changelog
1861 cl = self.changelog
1863 nodes = cl.nodesbetween(basenodes, None)[0]
1862 nodes = cl.nodesbetween(basenodes, None)[0]
1864 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1863 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1865 self.changegroupinfo(nodes, source)
1864 self.changegroupinfo(nodes, source)
1866
1865
1867 def identity(x):
1866 def identity(x):
1868 return x
1867 return x
1869
1868
1870 def gennodelst(revlog):
1869 def gennodelst(revlog):
1871 for r in xrange(0, revlog.count()):
1870 for r in xrange(0, revlog.count()):
1872 n = revlog.node(r)
1871 n = revlog.node(r)
1873 if revlog.linkrev(n) in revset:
1872 if revlog.linkrev(n) in revset:
1874 yield n
1873 yield n
1875
1874
1876 def changed_file_collector(changedfileset):
1875 def changed_file_collector(changedfileset):
1877 def collect_changed_files(clnode):
1876 def collect_changed_files(clnode):
1878 c = cl.read(clnode)
1877 c = cl.read(clnode)
1879 for fname in c[3]:
1878 for fname in c[3]:
1880 changedfileset[fname] = 1
1879 changedfileset[fname] = 1
1881 return collect_changed_files
1880 return collect_changed_files
1882
1881
1883 def lookuprevlink_func(revlog):
1882 def lookuprevlink_func(revlog):
1884 def lookuprevlink(n):
1883 def lookuprevlink(n):
1885 return cl.node(revlog.linkrev(n))
1884 return cl.node(revlog.linkrev(n))
1886 return lookuprevlink
1885 return lookuprevlink
1887
1886
1888 def gengroup():
1887 def gengroup():
1889 # construct a list of all changed files
1888 # construct a list of all changed files
1890 changedfiles = {}
1889 changedfiles = {}
1891
1890
1892 for chnk in cl.group(nodes, identity,
1891 for chnk in cl.group(nodes, identity,
1893 changed_file_collector(changedfiles)):
1892 changed_file_collector(changedfiles)):
1894 yield chnk
1893 yield chnk
1895 changedfiles = changedfiles.keys()
1894 changedfiles = changedfiles.keys()
1896 changedfiles.sort()
1895 changedfiles.sort()
1897
1896
1898 mnfst = self.manifest
1897 mnfst = self.manifest
1899 nodeiter = gennodelst(mnfst)
1898 nodeiter = gennodelst(mnfst)
1900 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1899 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1901 yield chnk
1900 yield chnk
1902
1901
1903 for fname in changedfiles:
1902 for fname in changedfiles:
1904 filerevlog = self.file(fname)
1903 filerevlog = self.file(fname)
1905 if filerevlog.count() == 0:
1904 if filerevlog.count() == 0:
1906 raise util.Abort(_("empty or missing revlog for %s") % fname)
1905 raise util.Abort(_("empty or missing revlog for %s") % fname)
1907 nodeiter = gennodelst(filerevlog)
1906 nodeiter = gennodelst(filerevlog)
1908 nodeiter = list(nodeiter)
1907 nodeiter = list(nodeiter)
1909 if nodeiter:
1908 if nodeiter:
1910 yield changegroup.chunkheader(len(fname))
1909 yield changegroup.chunkheader(len(fname))
1911 yield fname
1910 yield fname
1912 lookup = lookuprevlink_func(filerevlog)
1911 lookup = lookuprevlink_func(filerevlog)
1913 for chnk in filerevlog.group(nodeiter, lookup):
1912 for chnk in filerevlog.group(nodeiter, lookup):
1914 yield chnk
1913 yield chnk
1915
1914
1916 yield changegroup.closechunk()
1915 yield changegroup.closechunk()
1917
1916
1918 if nodes:
1917 if nodes:
1919 self.hook('outgoing', node=hex(nodes[0]), source=source)
1918 self.hook('outgoing', node=hex(nodes[0]), source=source)
1920
1919
1921 return util.chunkbuffer(gengroup())
1920 return util.chunkbuffer(gengroup())
1922
1921
1923 def addchangegroup(self, source, srctype, url, emptyok=False):
1922 def addchangegroup(self, source, srctype, url, emptyok=False):
1924 """add changegroup to repo.
1923 """add changegroup to repo.
1925
1924
1926 return values:
1925 return values:
1927 - nothing changed or no source: 0
1926 - nothing changed or no source: 0
1928 - more heads than before: 1+added heads (2..n)
1927 - more heads than before: 1+added heads (2..n)
1929 - less heads than before: -1-removed heads (-2..-n)
1928 - less heads than before: -1-removed heads (-2..-n)
1930 - number of heads stays the same: 1
1929 - number of heads stays the same: 1
1931 """
1930 """
1932 def csmap(x):
1931 def csmap(x):
1933 self.ui.debug(_("add changeset %s\n") % short(x))
1932 self.ui.debug(_("add changeset %s\n") % short(x))
1934 return cl.count()
1933 return cl.count()
1935
1934
1936 def revmap(x):
1935 def revmap(x):
1937 return cl.rev(x)
1936 return cl.rev(x)
1938
1937
1939 if not source:
1938 if not source:
1940 return 0
1939 return 0
1941
1940
1942 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1941 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1943
1942
1944 changesets = files = revisions = 0
1943 changesets = files = revisions = 0
1945
1944
1946 # write changelog data to temp files so concurrent readers will not see
1945 # write changelog data to temp files so concurrent readers will not see
1947 # inconsistent view
1946 # inconsistent view
1948 cl = self.changelog
1947 cl = self.changelog
1949 cl.delayupdate()
1948 cl.delayupdate()
1950 oldheads = len(cl.heads())
1949 oldheads = len(cl.heads())
1951
1950
1952 tr = self.transaction()
1951 tr = self.transaction()
1953 try:
1952 try:
1954 trp = weakref.proxy(tr)
1953 trp = weakref.proxy(tr)
1955 # pull off the changeset group
1954 # pull off the changeset group
1956 self.ui.status(_("adding changesets\n"))
1955 self.ui.status(_("adding changesets\n"))
1957 cor = cl.count() - 1
1956 cor = cl.count() - 1
1958 chunkiter = changegroup.chunkiter(source)
1957 chunkiter = changegroup.chunkiter(source)
1959 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1958 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1960 raise util.Abort(_("received changelog group is empty"))
1959 raise util.Abort(_("received changelog group is empty"))
1961 cnr = cl.count() - 1
1960 cnr = cl.count() - 1
1962 changesets = cnr - cor
1961 changesets = cnr - cor
1963
1962
1964 # pull off the manifest group
1963 # pull off the manifest group
1965 self.ui.status(_("adding manifests\n"))
1964 self.ui.status(_("adding manifests\n"))
1966 chunkiter = changegroup.chunkiter(source)
1965 chunkiter = changegroup.chunkiter(source)
1967 # no need to check for empty manifest group here:
1966 # no need to check for empty manifest group here:
1968 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1967 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1969 # no new manifest will be created and the manifest group will
1968 # no new manifest will be created and the manifest group will
1970 # be empty during the pull
1969 # be empty during the pull
1971 self.manifest.addgroup(chunkiter, revmap, trp)
1970 self.manifest.addgroup(chunkiter, revmap, trp)
1972
1971
1973 # process the files
1972 # process the files
1974 self.ui.status(_("adding file changes\n"))
1973 self.ui.status(_("adding file changes\n"))
1975 while 1:
1974 while 1:
1976 f = changegroup.getchunk(source)
1975 f = changegroup.getchunk(source)
1977 if not f:
1976 if not f:
1978 break
1977 break
1979 self.ui.debug(_("adding %s revisions\n") % f)
1978 self.ui.debug(_("adding %s revisions\n") % f)
1980 fl = self.file(f)
1979 fl = self.file(f)
1981 o = fl.count()
1980 o = fl.count()
1982 chunkiter = changegroup.chunkiter(source)
1981 chunkiter = changegroup.chunkiter(source)
1983 if fl.addgroup(chunkiter, revmap, trp) is None:
1982 if fl.addgroup(chunkiter, revmap, trp) is None:
1984 raise util.Abort(_("received file revlog group is empty"))
1983 raise util.Abort(_("received file revlog group is empty"))
1985 revisions += fl.count() - o
1984 revisions += fl.count() - o
1986 files += 1
1985 files += 1
1987
1986
1988 # make changelog see real files again
1987 # make changelog see real files again
1989 cl.finalize(trp)
1988 cl.finalize(trp)
1990
1989
1991 newheads = len(self.changelog.heads())
1990 newheads = len(self.changelog.heads())
1992 heads = ""
1991 heads = ""
1993 if oldheads and newheads != oldheads:
1992 if oldheads and newheads != oldheads:
1994 heads = _(" (%+d heads)") % (newheads - oldheads)
1993 heads = _(" (%+d heads)") % (newheads - oldheads)
1995
1994
1996 self.ui.status(_("added %d changesets"
1995 self.ui.status(_("added %d changesets"
1997 " with %d changes to %d files%s\n")
1996 " with %d changes to %d files%s\n")
1998 % (changesets, revisions, files, heads))
1997 % (changesets, revisions, files, heads))
1999
1998
2000 if changesets > 0:
1999 if changesets > 0:
2001 self.hook('pretxnchangegroup', throw=True,
2000 self.hook('pretxnchangegroup', throw=True,
2002 node=hex(self.changelog.node(cor+1)), source=srctype,
2001 node=hex(self.changelog.node(cor+1)), source=srctype,
2003 url=url)
2002 url=url)
2004
2003
2005 tr.close()
2004 tr.close()
2006 finally:
2005 finally:
2007 del tr
2006 del tr
2008
2007
2009 if changesets > 0:
2008 if changesets > 0:
2010 # forcefully update the on-disk branch cache
2009 # forcefully update the on-disk branch cache
2011 self.ui.debug(_("updating the branch cache\n"))
2010 self.ui.debug(_("updating the branch cache\n"))
2012 self.branchcache = None
2011 self.branchcache = None
2013 self.branchtags()
2012 self.branchtags()
2014 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2013 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2015 source=srctype, url=url)
2014 source=srctype, url=url)
2016
2015
2017 for i in xrange(cor + 1, cnr + 1):
2016 for i in xrange(cor + 1, cnr + 1):
2018 self.hook("incoming", node=hex(self.changelog.node(i)),
2017 self.hook("incoming", node=hex(self.changelog.node(i)),
2019 source=srctype, url=url)
2018 source=srctype, url=url)
2020
2019
2021 # never return 0 here:
2020 # never return 0 here:
2022 if newheads < oldheads:
2021 if newheads < oldheads:
2023 return newheads - oldheads - 1
2022 return newheads - oldheads - 1
2024 else:
2023 else:
2025 return newheads - oldheads + 1
2024 return newheads - oldheads + 1
2026
2025
2027
2026
2028 def stream_in(self, remote):
2027 def stream_in(self, remote):
2029 fp = remote.stream_out()
2028 fp = remote.stream_out()
2030 l = fp.readline()
2029 l = fp.readline()
2031 try:
2030 try:
2032 resp = int(l)
2031 resp = int(l)
2033 except ValueError:
2032 except ValueError:
2034 raise util.UnexpectedOutput(
2033 raise util.UnexpectedOutput(
2035 _('Unexpected response from remote server:'), l)
2034 _('Unexpected response from remote server:'), l)
2036 if resp == 1:
2035 if resp == 1:
2037 raise util.Abort(_('operation forbidden by server'))
2036 raise util.Abort(_('operation forbidden by server'))
2038 elif resp == 2:
2037 elif resp == 2:
2039 raise util.Abort(_('locking the remote repository failed'))
2038 raise util.Abort(_('locking the remote repository failed'))
2040 elif resp != 0:
2039 elif resp != 0:
2041 raise util.Abort(_('the server sent an unknown error code'))
2040 raise util.Abort(_('the server sent an unknown error code'))
2042 self.ui.status(_('streaming all changes\n'))
2041 self.ui.status(_('streaming all changes\n'))
2043 l = fp.readline()
2042 l = fp.readline()
2044 try:
2043 try:
2045 total_files, total_bytes = map(int, l.split(' ', 1))
2044 total_files, total_bytes = map(int, l.split(' ', 1))
2046 except ValueError, TypeError:
2045 except ValueError, TypeError:
2047 raise util.UnexpectedOutput(
2046 raise util.UnexpectedOutput(
2048 _('Unexpected response from remote server:'), l)
2047 _('Unexpected response from remote server:'), l)
2049 self.ui.status(_('%d files to transfer, %s of data\n') %
2048 self.ui.status(_('%d files to transfer, %s of data\n') %
2050 (total_files, util.bytecount(total_bytes)))
2049 (total_files, util.bytecount(total_bytes)))
2051 start = time.time()
2050 start = time.time()
2052 for i in xrange(total_files):
2051 for i in xrange(total_files):
2053 # XXX doesn't support '\n' or '\r' in filenames
2052 # XXX doesn't support '\n' or '\r' in filenames
2054 l = fp.readline()
2053 l = fp.readline()
2055 try:
2054 try:
2056 name, size = l.split('\0', 1)
2055 name, size = l.split('\0', 1)
2057 size = int(size)
2056 size = int(size)
2058 except ValueError, TypeError:
2057 except ValueError, TypeError:
2059 raise util.UnexpectedOutput(
2058 raise util.UnexpectedOutput(
2060 _('Unexpected response from remote server:'), l)
2059 _('Unexpected response from remote server:'), l)
2061 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2060 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2062 ofp = self.sopener(name, 'w')
2061 ofp = self.sopener(name, 'w')
2063 for chunk in util.filechunkiter(fp, limit=size):
2062 for chunk in util.filechunkiter(fp, limit=size):
2064 ofp.write(chunk)
2063 ofp.write(chunk)
2065 ofp.close()
2064 ofp.close()
2066 elapsed = time.time() - start
2065 elapsed = time.time() - start
2067 if elapsed <= 0:
2066 if elapsed <= 0:
2068 elapsed = 0.001
2067 elapsed = 0.001
2069 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2068 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2070 (util.bytecount(total_bytes), elapsed,
2069 (util.bytecount(total_bytes), elapsed,
2071 util.bytecount(total_bytes / elapsed)))
2070 util.bytecount(total_bytes / elapsed)))
2072 self.invalidate()
2071 self.invalidate()
2073 return len(self.heads()) + 1
2072 return len(self.heads()) + 1
2074
2073
2075 def clone(self, remote, heads=[], stream=False):
2074 def clone(self, remote, heads=[], stream=False):
2076 '''clone remote repository.
2075 '''clone remote repository.
2077
2076
2078 keyword arguments:
2077 keyword arguments:
2079 heads: list of revs to clone (forces use of pull)
2078 heads: list of revs to clone (forces use of pull)
2080 stream: use streaming clone if possible'''
2079 stream: use streaming clone if possible'''
2081
2080
2082 # now, all clients that can request uncompressed clones can
2081 # now, all clients that can request uncompressed clones can
2083 # read repo formats supported by all servers that can serve
2082 # read repo formats supported by all servers that can serve
2084 # them.
2083 # them.
2085
2084
2086 # if revlog format changes, client will have to check version
2085 # if revlog format changes, client will have to check version
2087 # and format flags on "stream" capability, and use
2086 # and format flags on "stream" capability, and use
2088 # uncompressed only if compatible.
2087 # uncompressed only if compatible.
2089
2088
2090 if stream and not heads and remote.capable('stream'):
2089 if stream and not heads and remote.capable('stream'):
2091 return self.stream_in(remote)
2090 return self.stream_in(remote)
2092 return self.pull(remote, heads)
2091 return self.pull(remote, heads)
2093
2092
2094 # used to avoid circular references so destructors work
2093 # used to avoid circular references so destructors work
2095 def aftertrans(files):
2094 def aftertrans(files):
2096 renamefiles = [tuple(t) for t in files]
2095 renamefiles = [tuple(t) for t in files]
2097 def a():
2096 def a():
2098 for src, dest in renamefiles:
2097 for src, dest in renamefiles:
2099 util.rename(src, dest)
2098 util.rename(src, dest)
2100 return a
2099 return a
2101
2100
2102 def instance(ui, path, create):
2101 def instance(ui, path, create):
2103 return localrepository(ui, util.drop_scheme('file', path), create)
2102 return localrepository(ui, util.drop_scheme('file', path), create)
2104
2103
2105 def islocal(path):
2104 def islocal(path):
2106 return True
2105 return True
General Comments 0
You need to be logged in to leave comments. Login now