##// END OF EJS Templates
merge with crew
Thomas Arendsen Hein -
r6126:11a09d57 merge default
parent child Browse files
Show More
@@ -1,2350 +1,2347
1 # queue.py - patch queues for mercurial
1 # queue.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial import commands, cmdutil, hg, patch, revlog, util
33 from mercurial import commands, cmdutil, hg, patch, revlog, util
34 from mercurial import repair
34 from mercurial import repair
35 import os, sys, re, errno
35 import os, sys, re, errno
36
36
37 commands.norepo += " qclone"
37 commands.norepo += " qclone"
38
38
39 # Patch names looks like unix-file names.
39 # Patch names looks like unix-file names.
40 # They must be joinable with queue directory and result in the patch path.
40 # They must be joinable with queue directory and result in the patch path.
41 normname = util.normpath
41 normname = util.normpath
42
42
43 class statusentry:
43 class statusentry:
44 def __init__(self, rev, name=None):
44 def __init__(self, rev, name=None):
45 if not name:
45 if not name:
46 fields = rev.split(':', 1)
46 fields = rev.split(':', 1)
47 if len(fields) == 2:
47 if len(fields) == 2:
48 self.rev, self.name = fields
48 self.rev, self.name = fields
49 else:
49 else:
50 self.rev, self.name = None, None
50 self.rev, self.name = None, None
51 else:
51 else:
52 self.rev, self.name = rev, name
52 self.rev, self.name = rev, name
53
53
54 def __str__(self):
54 def __str__(self):
55 return self.rev + ':' + self.name
55 return self.rev + ':' + self.name
56
56
57 class queue:
57 class queue:
58 def __init__(self, ui, path, patchdir=None):
58 def __init__(self, ui, path, patchdir=None):
59 self.basepath = path
59 self.basepath = path
60 self.path = patchdir or os.path.join(path, "patches")
60 self.path = patchdir or os.path.join(path, "patches")
61 self.opener = util.opener(self.path)
61 self.opener = util.opener(self.path)
62 self.ui = ui
62 self.ui = ui
63 self.applied = []
63 self.applied = []
64 self.full_series = []
64 self.full_series = []
65 self.applied_dirty = 0
65 self.applied_dirty = 0
66 self.series_dirty = 0
66 self.series_dirty = 0
67 self.series_path = "series"
67 self.series_path = "series"
68 self.status_path = "status"
68 self.status_path = "status"
69 self.guards_path = "guards"
69 self.guards_path = "guards"
70 self.active_guards = None
70 self.active_guards = None
71 self.guards_dirty = False
71 self.guards_dirty = False
72 self._diffopts = None
72 self._diffopts = None
73
73
74 if os.path.exists(self.join(self.series_path)):
74 if os.path.exists(self.join(self.series_path)):
75 self.full_series = self.opener(self.series_path).read().splitlines()
75 self.full_series = self.opener(self.series_path).read().splitlines()
76 self.parse_series()
76 self.parse_series()
77
77
78 if os.path.exists(self.join(self.status_path)):
78 if os.path.exists(self.join(self.status_path)):
79 lines = self.opener(self.status_path).read().splitlines()
79 lines = self.opener(self.status_path).read().splitlines()
80 self.applied = [statusentry(l) for l in lines]
80 self.applied = [statusentry(l) for l in lines]
81
81
82 def diffopts(self):
82 def diffopts(self):
83 if self._diffopts is None:
83 if self._diffopts is None:
84 self._diffopts = patch.diffopts(self.ui)
84 self._diffopts = patch.diffopts(self.ui)
85 return self._diffopts
85 return self._diffopts
86
86
87 def join(self, *p):
87 def join(self, *p):
88 return os.path.join(self.path, *p)
88 return os.path.join(self.path, *p)
89
89
90 def find_series(self, patch):
90 def find_series(self, patch):
91 pre = re.compile("(\s*)([^#]+)")
91 pre = re.compile("(\s*)([^#]+)")
92 index = 0
92 index = 0
93 for l in self.full_series:
93 for l in self.full_series:
94 m = pre.match(l)
94 m = pre.match(l)
95 if m:
95 if m:
96 s = m.group(2)
96 s = m.group(2)
97 s = s.rstrip()
97 s = s.rstrip()
98 if s == patch:
98 if s == patch:
99 return index
99 return index
100 index += 1
100 index += 1
101 return None
101 return None
102
102
103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
104
104
105 def parse_series(self):
105 def parse_series(self):
106 self.series = []
106 self.series = []
107 self.series_guards = []
107 self.series_guards = []
108 for l in self.full_series:
108 for l in self.full_series:
109 h = l.find('#')
109 h = l.find('#')
110 if h == -1:
110 if h == -1:
111 patch = l
111 patch = l
112 comment = ''
112 comment = ''
113 elif h == 0:
113 elif h == 0:
114 continue
114 continue
115 else:
115 else:
116 patch = l[:h]
116 patch = l[:h]
117 comment = l[h:]
117 comment = l[h:]
118 patch = patch.strip()
118 patch = patch.strip()
119 if patch:
119 if patch:
120 if patch in self.series:
120 if patch in self.series:
121 raise util.Abort(_('%s appears more than once in %s') %
121 raise util.Abort(_('%s appears more than once in %s') %
122 (patch, self.join(self.series_path)))
122 (patch, self.join(self.series_path)))
123 self.series.append(patch)
123 self.series.append(patch)
124 self.series_guards.append(self.guard_re.findall(comment))
124 self.series_guards.append(self.guard_re.findall(comment))
125
125
126 def check_guard(self, guard):
126 def check_guard(self, guard):
127 bad_chars = '# \t\r\n\f'
127 bad_chars = '# \t\r\n\f'
128 first = guard[0]
128 first = guard[0]
129 for c in '-+':
129 for c in '-+':
130 if first == c:
130 if first == c:
131 return (_('guard %r starts with invalid character: %r') %
131 return (_('guard %r starts with invalid character: %r') %
132 (guard, c))
132 (guard, c))
133 for c in bad_chars:
133 for c in bad_chars:
134 if c in guard:
134 if c in guard:
135 return _('invalid character in guard %r: %r') % (guard, c)
135 return _('invalid character in guard %r: %r') % (guard, c)
136
136
137 def set_active(self, guards):
137 def set_active(self, guards):
138 for guard in guards:
138 for guard in guards:
139 bad = self.check_guard(guard)
139 bad = self.check_guard(guard)
140 if bad:
140 if bad:
141 raise util.Abort(bad)
141 raise util.Abort(bad)
142 guards = dict.fromkeys(guards).keys()
142 guards = dict.fromkeys(guards).keys()
143 guards.sort()
143 guards.sort()
144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
145 self.active_guards = guards
145 self.active_guards = guards
146 self.guards_dirty = True
146 self.guards_dirty = True
147
147
148 def active(self):
148 def active(self):
149 if self.active_guards is None:
149 if self.active_guards is None:
150 self.active_guards = []
150 self.active_guards = []
151 try:
151 try:
152 guards = self.opener(self.guards_path).read().split()
152 guards = self.opener(self.guards_path).read().split()
153 except IOError, err:
153 except IOError, err:
154 if err.errno != errno.ENOENT: raise
154 if err.errno != errno.ENOENT: raise
155 guards = []
155 guards = []
156 for i, guard in enumerate(guards):
156 for i, guard in enumerate(guards):
157 bad = self.check_guard(guard)
157 bad = self.check_guard(guard)
158 if bad:
158 if bad:
159 self.ui.warn('%s:%d: %s\n' %
159 self.ui.warn('%s:%d: %s\n' %
160 (self.join(self.guards_path), i + 1, bad))
160 (self.join(self.guards_path), i + 1, bad))
161 else:
161 else:
162 self.active_guards.append(guard)
162 self.active_guards.append(guard)
163 return self.active_guards
163 return self.active_guards
164
164
165 def set_guards(self, idx, guards):
165 def set_guards(self, idx, guards):
166 for g in guards:
166 for g in guards:
167 if len(g) < 2:
167 if len(g) < 2:
168 raise util.Abort(_('guard %r too short') % g)
168 raise util.Abort(_('guard %r too short') % g)
169 if g[0] not in '-+':
169 if g[0] not in '-+':
170 raise util.Abort(_('guard %r starts with invalid char') % g)
170 raise util.Abort(_('guard %r starts with invalid char') % g)
171 bad = self.check_guard(g[1:])
171 bad = self.check_guard(g[1:])
172 if bad:
172 if bad:
173 raise util.Abort(bad)
173 raise util.Abort(bad)
174 drop = self.guard_re.sub('', self.full_series[idx])
174 drop = self.guard_re.sub('', self.full_series[idx])
175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
176 self.parse_series()
176 self.parse_series()
177 self.series_dirty = True
177 self.series_dirty = True
178
178
179 def pushable(self, idx):
179 def pushable(self, idx):
180 if isinstance(idx, str):
180 if isinstance(idx, str):
181 idx = self.series.index(idx)
181 idx = self.series.index(idx)
182 patchguards = self.series_guards[idx]
182 patchguards = self.series_guards[idx]
183 if not patchguards:
183 if not patchguards:
184 return True, None
184 return True, None
185 default = False
185 default = False
186 guards = self.active()
186 guards = self.active()
187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
188 if exactneg:
188 if exactneg:
189 return False, exactneg[0]
189 return False, exactneg[0]
190 pos = [g for g in patchguards if g[0] == '+']
190 pos = [g for g in patchguards if g[0] == '+']
191 exactpos = [g for g in pos if g[1:] in guards]
191 exactpos = [g for g in pos if g[1:] in guards]
192 if pos:
192 if pos:
193 if exactpos:
193 if exactpos:
194 return True, exactpos[0]
194 return True, exactpos[0]
195 return False, pos
195 return False, pos
196 return True, ''
196 return True, ''
197
197
198 def explain_pushable(self, idx, all_patches=False):
198 def explain_pushable(self, idx, all_patches=False):
199 write = all_patches and self.ui.write or self.ui.warn
199 write = all_patches and self.ui.write or self.ui.warn
200 if all_patches or self.ui.verbose:
200 if all_patches or self.ui.verbose:
201 if isinstance(idx, str):
201 if isinstance(idx, str):
202 idx = self.series.index(idx)
202 idx = self.series.index(idx)
203 pushable, why = self.pushable(idx)
203 pushable, why = self.pushable(idx)
204 if all_patches and pushable:
204 if all_patches and pushable:
205 if why is None:
205 if why is None:
206 write(_('allowing %s - no guards in effect\n') %
206 write(_('allowing %s - no guards in effect\n') %
207 self.series[idx])
207 self.series[idx])
208 else:
208 else:
209 if not why:
209 if not why:
210 write(_('allowing %s - no matching negative guards\n') %
210 write(_('allowing %s - no matching negative guards\n') %
211 self.series[idx])
211 self.series[idx])
212 else:
212 else:
213 write(_('allowing %s - guarded by %r\n') %
213 write(_('allowing %s - guarded by %r\n') %
214 (self.series[idx], why))
214 (self.series[idx], why))
215 if not pushable:
215 if not pushable:
216 if why:
216 if why:
217 write(_('skipping %s - guarded by %r\n') %
217 write(_('skipping %s - guarded by %r\n') %
218 (self.series[idx], why))
218 (self.series[idx], why))
219 else:
219 else:
220 write(_('skipping %s - no matching guards\n') %
220 write(_('skipping %s - no matching guards\n') %
221 self.series[idx])
221 self.series[idx])
222
222
223 def save_dirty(self):
223 def save_dirty(self):
224 def write_list(items, path):
224 def write_list(items, path):
225 fp = self.opener(path, 'w')
225 fp = self.opener(path, 'w')
226 for i in items:
226 for i in items:
227 fp.write("%s\n" % i)
227 fp.write("%s\n" % i)
228 fp.close()
228 fp.close()
229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
230 if self.series_dirty: write_list(self.full_series, self.series_path)
230 if self.series_dirty: write_list(self.full_series, self.series_path)
231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
232
232
233 def readheaders(self, patch):
233 def readheaders(self, patch):
234 def eatdiff(lines):
234 def eatdiff(lines):
235 while lines:
235 while lines:
236 l = lines[-1]
236 l = lines[-1]
237 if (l.startswith("diff -") or
237 if (l.startswith("diff -") or
238 l.startswith("Index:") or
238 l.startswith("Index:") or
239 l.startswith("===========")):
239 l.startswith("===========")):
240 del lines[-1]
240 del lines[-1]
241 else:
241 else:
242 break
242 break
243 def eatempty(lines):
243 def eatempty(lines):
244 while lines:
244 while lines:
245 l = lines[-1]
245 l = lines[-1]
246 if re.match('\s*$', l):
246 if re.match('\s*$', l):
247 del lines[-1]
247 del lines[-1]
248 else:
248 else:
249 break
249 break
250
250
251 pf = self.join(patch)
251 pf = self.join(patch)
252 message = []
252 message = []
253 comments = []
253 comments = []
254 user = None
254 user = None
255 date = None
255 date = None
256 format = None
256 format = None
257 subject = None
257 subject = None
258 diffstart = 0
258 diffstart = 0
259
259
260 for line in file(pf):
260 for line in file(pf):
261 line = line.rstrip()
261 line = line.rstrip()
262 if line.startswith('diff --git'):
262 if line.startswith('diff --git'):
263 diffstart = 2
263 diffstart = 2
264 break
264 break
265 if diffstart:
265 if diffstart:
266 if line.startswith('+++ '):
266 if line.startswith('+++ '):
267 diffstart = 2
267 diffstart = 2
268 break
268 break
269 if line.startswith("--- "):
269 if line.startswith("--- "):
270 diffstart = 1
270 diffstart = 1
271 continue
271 continue
272 elif format == "hgpatch":
272 elif format == "hgpatch":
273 # parse values when importing the result of an hg export
273 # parse values when importing the result of an hg export
274 if line.startswith("# User "):
274 if line.startswith("# User "):
275 user = line[7:]
275 user = line[7:]
276 elif line.startswith("# Date "):
276 elif line.startswith("# Date "):
277 date = line[7:]
277 date = line[7:]
278 elif not line.startswith("# ") and line:
278 elif not line.startswith("# ") and line:
279 message.append(line)
279 message.append(line)
280 format = None
280 format = None
281 elif line == '# HG changeset patch':
281 elif line == '# HG changeset patch':
282 format = "hgpatch"
282 format = "hgpatch"
283 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 elif (format != "tagdone" and (line.startswith("Subject: ") or
284 line.startswith("subject: "))):
284 line.startswith("subject: "))):
285 subject = line[9:]
285 subject = line[9:]
286 format = "tag"
286 format = "tag"
287 elif (format != "tagdone" and (line.startswith("From: ") or
287 elif (format != "tagdone" and (line.startswith("From: ") or
288 line.startswith("from: "))):
288 line.startswith("from: "))):
289 user = line[6:]
289 user = line[6:]
290 format = "tag"
290 format = "tag"
291 elif format == "tag" and line == "":
291 elif format == "tag" and line == "":
292 # when looking for tags (subject: from: etc) they
292 # when looking for tags (subject: from: etc) they
293 # end once you find a blank line in the source
293 # end once you find a blank line in the source
294 format = "tagdone"
294 format = "tagdone"
295 elif message or line:
295 elif message or line:
296 message.append(line)
296 message.append(line)
297 comments.append(line)
297 comments.append(line)
298
298
299 eatdiff(message)
299 eatdiff(message)
300 eatdiff(comments)
300 eatdiff(comments)
301 eatempty(message)
301 eatempty(message)
302 eatempty(comments)
302 eatempty(comments)
303
303
304 # make sure message isn't empty
304 # make sure message isn't empty
305 if format and format.startswith("tag") and subject:
305 if format and format.startswith("tag") and subject:
306 message.insert(0, "")
306 message.insert(0, "")
307 message.insert(0, subject)
307 message.insert(0, subject)
308 return (message, comments, user, date, diffstart > 1)
308 return (message, comments, user, date, diffstart > 1)
309
309
310 def removeundo(self, repo):
310 def removeundo(self, repo):
311 undo = repo.sjoin('undo')
311 undo = repo.sjoin('undo')
312 if not os.path.exists(undo):
312 if not os.path.exists(undo):
313 return
313 return
314 try:
314 try:
315 os.unlink(undo)
315 os.unlink(undo)
316 except OSError, inst:
316 except OSError, inst:
317 self.ui.warn('error removing undo: %s\n' % str(inst))
317 self.ui.warn('error removing undo: %s\n' % str(inst))
318
318
319 def printdiff(self, repo, node1, node2=None, files=None,
319 def printdiff(self, repo, node1, node2=None, files=None,
320 fp=None, changes=None, opts={}):
320 fp=None, changes=None, opts={}):
321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
322
322
323 patch.diff(repo, node1, node2, fns, match=matchfn,
323 patch.diff(repo, node1, node2, fns, match=matchfn,
324 fp=fp, changes=changes, opts=self.diffopts())
324 fp=fp, changes=changes, opts=self.diffopts())
325
325
326 def mergeone(self, repo, mergeq, head, patch, rev):
326 def mergeone(self, repo, mergeq, head, patch, rev):
327 # first try just applying the patch
327 # first try just applying the patch
328 (err, n) = self.apply(repo, [ patch ], update_status=False,
328 (err, n) = self.apply(repo, [ patch ], update_status=False,
329 strict=True, merge=rev)
329 strict=True, merge=rev)
330
330
331 if err == 0:
331 if err == 0:
332 return (err, n)
332 return (err, n)
333
333
334 if n is None:
334 if n is None:
335 raise util.Abort(_("apply failed for patch %s") % patch)
335 raise util.Abort(_("apply failed for patch %s") % patch)
336
336
337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
338
338
339 # apply failed, strip away that rev and merge.
339 # apply failed, strip away that rev and merge.
340 hg.clean(repo, head)
340 hg.clean(repo, head)
341 self.strip(repo, n, update=False, backup='strip')
341 self.strip(repo, n, update=False, backup='strip')
342
342
343 ctx = repo.changectx(rev)
343 ctx = repo.changectx(rev)
344 ret = hg.merge(repo, rev)
344 ret = hg.merge(repo, rev)
345 if ret:
345 if ret:
346 raise util.Abort(_("update returned %d") % ret)
346 raise util.Abort(_("update returned %d") % ret)
347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
348 if n == None:
348 if n == None:
349 raise util.Abort(_("repo commit failed"))
349 raise util.Abort(_("repo commit failed"))
350 try:
350 try:
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 except:
352 except:
353 raise util.Abort(_("unable to read %s") % patch)
353 raise util.Abort(_("unable to read %s") % patch)
354
354
355 patchf = self.opener(patch, "w")
355 patchf = self.opener(patch, "w")
356 if comments:
356 if comments:
357 comments = "\n".join(comments) + '\n\n'
357 comments = "\n".join(comments) + '\n\n'
358 patchf.write(comments)
358 patchf.write(comments)
359 self.printdiff(repo, head, n, fp=patchf)
359 self.printdiff(repo, head, n, fp=patchf)
360 patchf.close()
360 patchf.close()
361 self.removeundo(repo)
361 self.removeundo(repo)
362 return (0, n)
362 return (0, n)
363
363
364 def qparents(self, repo, rev=None):
364 def qparents(self, repo, rev=None):
365 if rev is None:
365 if rev is None:
366 (p1, p2) = repo.dirstate.parents()
366 (p1, p2) = repo.dirstate.parents()
367 if p2 == revlog.nullid:
367 if p2 == revlog.nullid:
368 return p1
368 return p1
369 if len(self.applied) == 0:
369 if len(self.applied) == 0:
370 return None
370 return None
371 return revlog.bin(self.applied[-1].rev)
371 return revlog.bin(self.applied[-1].rev)
372 pp = repo.changelog.parents(rev)
372 pp = repo.changelog.parents(rev)
373 if pp[1] != revlog.nullid:
373 if pp[1] != revlog.nullid:
374 arevs = [ x.rev for x in self.applied ]
374 arevs = [ x.rev for x in self.applied ]
375 p0 = revlog.hex(pp[0])
375 p0 = revlog.hex(pp[0])
376 p1 = revlog.hex(pp[1])
376 p1 = revlog.hex(pp[1])
377 if p0 in arevs:
377 if p0 in arevs:
378 return pp[0]
378 return pp[0]
379 if p1 in arevs:
379 if p1 in arevs:
380 return pp[1]
380 return pp[1]
381 return pp[0]
381 return pp[0]
382
382
383 def mergepatch(self, repo, mergeq, series):
383 def mergepatch(self, repo, mergeq, series):
384 if len(self.applied) == 0:
384 if len(self.applied) == 0:
385 # each of the patches merged in will have two parents. This
385 # each of the patches merged in will have two parents. This
386 # can confuse the qrefresh, qdiff, and strip code because it
386 # can confuse the qrefresh, qdiff, and strip code because it
387 # needs to know which parent is actually in the patch queue.
387 # needs to know which parent is actually in the patch queue.
388 # so, we insert a merge marker with only one parent. This way
388 # so, we insert a merge marker with only one parent. This way
389 # the first patch in the queue is never a merge patch
389 # the first patch in the queue is never a merge patch
390 #
390 #
391 pname = ".hg.patches.merge.marker"
391 pname = ".hg.patches.merge.marker"
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
393 self.removeundo(repo)
393 self.removeundo(repo)
394 self.applied.append(statusentry(revlog.hex(n), pname))
394 self.applied.append(statusentry(revlog.hex(n), pname))
395 self.applied_dirty = 1
395 self.applied_dirty = 1
396
396
397 head = self.qparents(repo)
397 head = self.qparents(repo)
398
398
399 for patch in series:
399 for patch in series:
400 patch = mergeq.lookup(patch, strict=True)
400 patch = mergeq.lookup(patch, strict=True)
401 if not patch:
401 if not patch:
402 self.ui.warn("patch %s does not exist\n" % patch)
402 self.ui.warn("patch %s does not exist\n" % patch)
403 return (1, None)
403 return (1, None)
404 pushable, reason = self.pushable(patch)
404 pushable, reason = self.pushable(patch)
405 if not pushable:
405 if not pushable:
406 self.explain_pushable(patch, all_patches=True)
406 self.explain_pushable(patch, all_patches=True)
407 continue
407 continue
408 info = mergeq.isapplied(patch)
408 info = mergeq.isapplied(patch)
409 if not info:
409 if not info:
410 self.ui.warn("patch %s is not applied\n" % patch)
410 self.ui.warn("patch %s is not applied\n" % patch)
411 return (1, None)
411 return (1, None)
412 rev = revlog.bin(info[1])
412 rev = revlog.bin(info[1])
413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
414 if head:
414 if head:
415 self.applied.append(statusentry(revlog.hex(head), patch))
415 self.applied.append(statusentry(revlog.hex(head), patch))
416 self.applied_dirty = 1
416 self.applied_dirty = 1
417 if err:
417 if err:
418 return (err, head)
418 return (err, head)
419 self.save_dirty()
419 self.save_dirty()
420 return (0, head)
420 return (0, head)
421
421
422 def patch(self, repo, patchfile):
422 def patch(self, repo, patchfile):
423 '''Apply patchfile to the working directory.
423 '''Apply patchfile to the working directory.
424 patchfile: file name of patch'''
424 patchfile: file name of patch'''
425 files = {}
425 files = {}
426 try:
426 try:
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 files=files)
428 files=files)
429 except Exception, inst:
429 except Exception, inst:
430 self.ui.note(str(inst) + '\n')
430 self.ui.note(str(inst) + '\n')
431 if not self.ui.verbose:
431 if not self.ui.verbose:
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 return (False, files, False)
433 return (False, files, False)
434
434
435 return (True, files, fuzz)
435 return (True, files, fuzz)
436
436
437 def apply(self, repo, series, list=False, update_status=True,
437 def apply(self, repo, series, list=False, update_status=True,
438 strict=False, patchdir=None, merge=None, all_files={}):
438 strict=False, patchdir=None, merge=None, all_files={}):
439 wlock = lock = tr = None
439 wlock = lock = tr = None
440 try:
440 try:
441 wlock = repo.wlock()
441 wlock = repo.wlock()
442 lock = repo.lock()
442 lock = repo.lock()
443 tr = repo.transaction()
443 tr = repo.transaction()
444 try:
444 try:
445 ret = self._apply(repo, series, list, update_status,
445 ret = self._apply(repo, series, list, update_status,
446 strict, patchdir, merge, all_files=all_files)
446 strict, patchdir, merge, all_files=all_files)
447 tr.close()
447 tr.close()
448 self.save_dirty()
448 self.save_dirty()
449 return ret
449 return ret
450 except:
450 except:
451 try:
451 try:
452 tr.abort()
452 tr.abort()
453 finally:
453 finally:
454 repo.invalidate()
454 repo.invalidate()
455 repo.dirstate.invalidate()
455 repo.dirstate.invalidate()
456 raise
456 raise
457 finally:
457 finally:
458 del tr, lock, wlock
458 del tr, lock, wlock
459 self.removeundo(repo)
459 self.removeundo(repo)
460
460
461 def _apply(self, repo, series, list=False, update_status=True,
461 def _apply(self, repo, series, list=False, update_status=True,
462 strict=False, patchdir=None, merge=None, all_files={}):
462 strict=False, patchdir=None, merge=None, all_files={}):
463 # TODO unify with commands.py
463 # TODO unify with commands.py
464 if not patchdir:
464 if not patchdir:
465 patchdir = self.path
465 patchdir = self.path
466 err = 0
466 err = 0
467 n = None
467 n = None
468 for patchname in series:
468 for patchname in series:
469 pushable, reason = self.pushable(patchname)
469 pushable, reason = self.pushable(patchname)
470 if not pushable:
470 if not pushable:
471 self.explain_pushable(patchname, all_patches=True)
471 self.explain_pushable(patchname, all_patches=True)
472 continue
472 continue
473 self.ui.warn("applying %s\n" % patchname)
473 self.ui.warn("applying %s\n" % patchname)
474 pf = os.path.join(patchdir, patchname)
474 pf = os.path.join(patchdir, patchname)
475
475
476 try:
476 try:
477 message, comments, user, date, patchfound = self.readheaders(patchname)
477 message, comments, user, date, patchfound = self.readheaders(patchname)
478 except:
478 except:
479 self.ui.warn("Unable to read %s\n" % patchname)
479 self.ui.warn("Unable to read %s\n" % patchname)
480 err = 1
480 err = 1
481 break
481 break
482
482
483 if not message:
483 if not message:
484 message = "imported patch %s\n" % patchname
484 message = "imported patch %s\n" % patchname
485 else:
485 else:
486 if list:
486 if list:
487 message.append("\nimported patch %s" % patchname)
487 message.append("\nimported patch %s" % patchname)
488 message = '\n'.join(message)
488 message = '\n'.join(message)
489
489
490 (patcherr, files, fuzz) = self.patch(repo, pf)
490 (patcherr, files, fuzz) = self.patch(repo, pf)
491 all_files.update(files)
491 all_files.update(files)
492 patcherr = not patcherr
492 patcherr = not patcherr
493
493
494 if merge and files:
494 if merge and files:
495 # Mark as removed/merged and update dirstate parent info
495 # Mark as removed/merged and update dirstate parent info
496 removed = []
496 removed = []
497 merged = []
497 merged = []
498 for f in files:
498 for f in files:
499 if os.path.exists(repo.wjoin(f)):
499 if os.path.exists(repo.wjoin(f)):
500 merged.append(f)
500 merged.append(f)
501 else:
501 else:
502 removed.append(f)
502 removed.append(f)
503 for f in removed:
503 for f in removed:
504 repo.dirstate.remove(f)
504 repo.dirstate.remove(f)
505 for f in merged:
505 for f in merged:
506 repo.dirstate.merge(f)
506 repo.dirstate.merge(f)
507 p1, p2 = repo.dirstate.parents()
507 p1, p2 = repo.dirstate.parents()
508 repo.dirstate.setparents(p1, merge)
508 repo.dirstate.setparents(p1, merge)
509 files = patch.updatedir(self.ui, repo, files)
509 files = patch.updatedir(self.ui, repo, files)
510 n = repo.commit(files, message, user, date, force=1)
510 n = repo.commit(files, message, user, date, force=1)
511
511
512 if n == None:
512 if n == None:
513 raise util.Abort(_("repo commit failed"))
513 raise util.Abort(_("repo commit failed"))
514
514
515 if update_status:
515 if update_status:
516 self.applied.append(statusentry(revlog.hex(n), patchname))
516 self.applied.append(statusentry(revlog.hex(n), patchname))
517
517
518 if patcherr:
518 if patcherr:
519 if not patchfound:
519 if not patchfound:
520 self.ui.warn("patch %s is empty\n" % patchname)
520 self.ui.warn("patch %s is empty\n" % patchname)
521 err = 0
521 err = 0
522 else:
522 else:
523 self.ui.warn("patch failed, rejects left in working dir\n")
523 self.ui.warn("patch failed, rejects left in working dir\n")
524 err = 1
524 err = 1
525 break
525 break
526
526
527 if fuzz and strict:
527 if fuzz and strict:
528 self.ui.warn("fuzz found when applying patch, stopping\n")
528 self.ui.warn("fuzz found when applying patch, stopping\n")
529 err = 1
529 err = 1
530 break
530 break
531 return (err, n)
531 return (err, n)
532
532
533 def delete(self, repo, patches, opts):
533 def delete(self, repo, patches, opts):
534 if not patches and not opts.get('rev'):
534 if not patches and not opts.get('rev'):
535 raise util.Abort(_('qdelete requires at least one revision or '
535 raise util.Abort(_('qdelete requires at least one revision or '
536 'patch name'))
536 'patch name'))
537
537
538 realpatches = []
538 realpatches = []
539 for patch in patches:
539 for patch in patches:
540 patch = self.lookup(patch, strict=True)
540 patch = self.lookup(patch, strict=True)
541 info = self.isapplied(patch)
541 info = self.isapplied(patch)
542 if info:
542 if info:
543 raise util.Abort(_("cannot delete applied patch %s") % patch)
543 raise util.Abort(_("cannot delete applied patch %s") % patch)
544 if patch not in self.series:
544 if patch not in self.series:
545 raise util.Abort(_("patch %s not in series file") % patch)
545 raise util.Abort(_("patch %s not in series file") % patch)
546 realpatches.append(patch)
546 realpatches.append(patch)
547
547
548 appliedbase = 0
548 appliedbase = 0
549 if opts.get('rev'):
549 if opts.get('rev'):
550 if not self.applied:
550 if not self.applied:
551 raise util.Abort(_('no patches applied'))
551 raise util.Abort(_('no patches applied'))
552 revs = cmdutil.revrange(repo, opts['rev'])
552 revs = cmdutil.revrange(repo, opts['rev'])
553 if len(revs) > 1 and revs[0] > revs[1]:
553 if len(revs) > 1 and revs[0] > revs[1]:
554 revs.reverse()
554 revs.reverse()
555 for rev in revs:
555 for rev in revs:
556 if appliedbase >= len(self.applied):
556 if appliedbase >= len(self.applied):
557 raise util.Abort(_("revision %d is not managed") % rev)
557 raise util.Abort(_("revision %d is not managed") % rev)
558
558
559 base = revlog.bin(self.applied[appliedbase].rev)
559 base = revlog.bin(self.applied[appliedbase].rev)
560 node = repo.changelog.node(rev)
560 node = repo.changelog.node(rev)
561 if node != base:
561 if node != base:
562 raise util.Abort(_("cannot delete revision %d above "
562 raise util.Abort(_("cannot delete revision %d above "
563 "applied patches") % rev)
563 "applied patches") % rev)
564 realpatches.append(self.applied[appliedbase].name)
564 realpatches.append(self.applied[appliedbase].name)
565 appliedbase += 1
565 appliedbase += 1
566
566
567 if not opts.get('keep'):
567 if not opts.get('keep'):
568 r = self.qrepo()
568 r = self.qrepo()
569 if r:
569 if r:
570 r.remove(realpatches, True)
570 r.remove(realpatches, True)
571 else:
571 else:
572 for p in realpatches:
572 for p in realpatches:
573 os.unlink(self.join(p))
573 os.unlink(self.join(p))
574
574
575 if appliedbase:
575 if appliedbase:
576 del self.applied[:appliedbase]
576 del self.applied[:appliedbase]
577 self.applied_dirty = 1
577 self.applied_dirty = 1
578 indices = [self.find_series(p) for p in realpatches]
578 indices = [self.find_series(p) for p in realpatches]
579 indices.sort()
579 indices.sort()
580 for i in indices[-1::-1]:
580 for i in indices[-1::-1]:
581 del self.full_series[i]
581 del self.full_series[i]
582 self.parse_series()
582 self.parse_series()
583 self.series_dirty = 1
583 self.series_dirty = 1
584
584
585 def check_toppatch(self, repo):
585 def check_toppatch(self, repo):
586 if len(self.applied) > 0:
586 if len(self.applied) > 0:
587 top = revlog.bin(self.applied[-1].rev)
587 top = revlog.bin(self.applied[-1].rev)
588 pp = repo.dirstate.parents()
588 pp = repo.dirstate.parents()
589 if top not in pp:
589 if top not in pp:
590 raise util.Abort(_("working directory revision is not qtip"))
590 raise util.Abort(_("working directory revision is not qtip"))
591 return top
591 return top
592 return None
592 return None
593 def check_localchanges(self, repo, force=False, refresh=True):
593 def check_localchanges(self, repo, force=False, refresh=True):
594 m, a, r, d = repo.status()[:4]
594 m, a, r, d = repo.status()[:4]
595 if m or a or r or d:
595 if m or a or r or d:
596 if not force:
596 if not force:
597 if refresh:
597 if refresh:
598 raise util.Abort(_("local changes found, refresh first"))
598 raise util.Abort(_("local changes found, refresh first"))
599 else:
599 else:
600 raise util.Abort(_("local changes found"))
600 raise util.Abort(_("local changes found"))
601 return m, a, r, d
601 return m, a, r, d
602
602
603 _reserved = ('series', 'status', 'guards')
603 _reserved = ('series', 'status', 'guards')
604 def check_reserved_name(self, name):
604 def check_reserved_name(self, name):
605 if (name in self._reserved or name.startswith('.hg')
605 if (name in self._reserved or name.startswith('.hg')
606 or name.startswith('.mq')):
606 or name.startswith('.mq')):
607 raise util.Abort(_('"%s" cannot be used as the name of a patch')
607 raise util.Abort(_('"%s" cannot be used as the name of a patch')
608 % name)
608 % name)
609
609
610 def new(self, repo, patch, *pats, **opts):
610 def new(self, repo, patch, *pats, **opts):
611 msg = opts.get('msg')
611 msg = opts.get('msg')
612 force = opts.get('force')
612 force = opts.get('force')
613 user = opts.get('user')
613 user = opts.get('user')
614 date = opts.get('date')
614 date = opts.get('date')
615 self.check_reserved_name(patch)
615 self.check_reserved_name(patch)
616 if os.path.exists(self.join(patch)):
616 if os.path.exists(self.join(patch)):
617 raise util.Abort(_('patch "%s" already exists') % patch)
617 raise util.Abort(_('patch "%s" already exists') % patch)
618 if opts.get('include') or opts.get('exclude') or pats:
618 if opts.get('include') or opts.get('exclude') or pats:
619 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
619 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
620 m, a, r, d = repo.status(files=fns, match=match)[:4]
620 m, a, r, d = repo.status(files=fns, match=match)[:4]
621 else:
621 else:
622 m, a, r, d = self.check_localchanges(repo, force)
622 m, a, r, d = self.check_localchanges(repo, force)
623 fns, match, anypats = cmdutil.matchpats(repo, m + a + r)
623 fns, match, anypats = cmdutil.matchpats(repo, m + a + r)
624 commitfiles = m + a + r
624 commitfiles = m + a + r
625 self.check_toppatch(repo)
625 self.check_toppatch(repo)
626 wlock = repo.wlock()
626 wlock = repo.wlock()
627 try:
627 try:
628 insert = self.full_series_end()
628 insert = self.full_series_end()
629 commitmsg = msg and msg or ("[mq]: %s" % patch)
629 commitmsg = msg and msg or ("[mq]: %s" % patch)
630 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
630 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
631 if n == None:
631 if n == None:
632 raise util.Abort(_("repo commit failed"))
632 raise util.Abort(_("repo commit failed"))
633 self.full_series[insert:insert] = [patch]
633 self.full_series[insert:insert] = [patch]
634 self.applied.append(statusentry(revlog.hex(n), patch))
634 self.applied.append(statusentry(revlog.hex(n), patch))
635 self.parse_series()
635 self.parse_series()
636 self.series_dirty = 1
636 self.series_dirty = 1
637 self.applied_dirty = 1
637 self.applied_dirty = 1
638 p = self.opener(patch, "w")
638 p = self.opener(patch, "w")
639 if date:
639 if date:
640 p.write("# HG changeset patch\n")
640 p.write("# HG changeset patch\n")
641 if user:
641 if user:
642 p.write("# User " + user + "\n")
642 p.write("# User " + user + "\n")
643 p.write("# Date " + date + "\n")
643 p.write("# Date " + date + "\n")
644 p.write("\n")
644 p.write("\n")
645 elif user:
645 elif user:
646 p.write("From: " + user + "\n")
646 p.write("From: " + user + "\n")
647 p.write("\n")
647 p.write("\n")
648 if msg:
648 if msg:
649 msg = msg + "\n"
649 msg = msg + "\n"
650 p.write(msg)
650 p.write(msg)
651 p.close()
651 p.close()
652 wlock = None
652 wlock = None
653 r = self.qrepo()
653 r = self.qrepo()
654 if r: r.add([patch])
654 if r: r.add([patch])
655 if commitfiles:
655 if commitfiles:
656 self.refresh(repo, short=True, git=opts.get('git'))
656 self.refresh(repo, short=True, git=opts.get('git'))
657 self.removeundo(repo)
657 self.removeundo(repo)
658 finally:
658 finally:
659 del wlock
659 del wlock
660
660
661 def strip(self, repo, rev, update=True, backup="all"):
661 def strip(self, repo, rev, update=True, backup="all"):
662 wlock = lock = None
662 wlock = lock = None
663 try:
663 try:
664 wlock = repo.wlock()
664 wlock = repo.wlock()
665 lock = repo.lock()
665 lock = repo.lock()
666
666
667 if update:
667 if update:
668 self.check_localchanges(repo, refresh=False)
668 self.check_localchanges(repo, refresh=False)
669 urev = self.qparents(repo, rev)
669 urev = self.qparents(repo, rev)
670 hg.clean(repo, urev)
670 hg.clean(repo, urev)
671 repo.dirstate.write()
671 repo.dirstate.write()
672
672
673 self.removeundo(repo)
673 self.removeundo(repo)
674 repair.strip(self.ui, repo, rev, backup)
674 repair.strip(self.ui, repo, rev, backup)
675 # strip may have unbundled a set of backed up revisions after
675 # strip may have unbundled a set of backed up revisions after
676 # the actual strip
676 # the actual strip
677 self.removeundo(repo)
677 self.removeundo(repo)
678 finally:
678 finally:
679 del lock, wlock
679 del lock, wlock
680
680
681 def isapplied(self, patch):
681 def isapplied(self, patch):
682 """returns (index, rev, patch)"""
682 """returns (index, rev, patch)"""
683 for i in xrange(len(self.applied)):
683 for i in xrange(len(self.applied)):
684 a = self.applied[i]
684 a = self.applied[i]
685 if a.name == patch:
685 if a.name == patch:
686 return (i, a.rev, a.name)
686 return (i, a.rev, a.name)
687 return None
687 return None
688
688
689 # if the exact patch name does not exist, we try a few
689 # if the exact patch name does not exist, we try a few
690 # variations. If strict is passed, we try only #1
690 # variations. If strict is passed, we try only #1
691 #
691 #
692 # 1) a number to indicate an offset in the series file
692 # 1) a number to indicate an offset in the series file
693 # 2) a unique substring of the patch name was given
693 # 2) a unique substring of the patch name was given
694 # 3) patchname[-+]num to indicate an offset in the series file
694 # 3) patchname[-+]num to indicate an offset in the series file
695 def lookup(self, patch, strict=False):
695 def lookup(self, patch, strict=False):
696 patch = patch and str(patch)
696 patch = patch and str(patch)
697
697
698 def partial_name(s):
698 def partial_name(s):
699 if s in self.series:
699 if s in self.series:
700 return s
700 return s
701 matches = [x for x in self.series if s in x]
701 matches = [x for x in self.series if s in x]
702 if len(matches) > 1:
702 if len(matches) > 1:
703 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
703 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
704 for m in matches:
704 for m in matches:
705 self.ui.warn(' %s\n' % m)
705 self.ui.warn(' %s\n' % m)
706 return None
706 return None
707 if matches:
707 if matches:
708 return matches[0]
708 return matches[0]
709 if len(self.series) > 0 and len(self.applied) > 0:
709 if len(self.series) > 0 and len(self.applied) > 0:
710 if s == 'qtip':
710 if s == 'qtip':
711 return self.series[self.series_end(True)-1]
711 return self.series[self.series_end(True)-1]
712 if s == 'qbase':
712 if s == 'qbase':
713 return self.series[0]
713 return self.series[0]
714 return None
714 return None
715 if patch == None:
715 if patch == None:
716 return None
716 return None
717
717
718 # we don't want to return a partial match until we make
718 # we don't want to return a partial match until we make
719 # sure the file name passed in does not exist (checked below)
719 # sure the file name passed in does not exist (checked below)
720 res = partial_name(patch)
720 res = partial_name(patch)
721 if res and res == patch:
721 if res and res == patch:
722 return res
722 return res
723
723
724 if not os.path.isfile(self.join(patch)):
724 if not os.path.isfile(self.join(patch)):
725 try:
725 try:
726 sno = int(patch)
726 sno = int(patch)
727 except(ValueError, OverflowError):
727 except(ValueError, OverflowError):
728 pass
728 pass
729 else:
729 else:
730 if sno < len(self.series):
730 if sno < len(self.series):
731 return self.series[sno]
731 return self.series[sno]
732 if not strict:
732 if not strict:
733 # return any partial match made above
733 # return any partial match made above
734 if res:
734 if res:
735 return res
735 return res
736 minus = patch.rfind('-')
736 minus = patch.rfind('-')
737 if minus >= 0:
737 if minus >= 0:
738 res = partial_name(patch[:minus])
738 res = partial_name(patch[:minus])
739 if res:
739 if res:
740 i = self.series.index(res)
740 i = self.series.index(res)
741 try:
741 try:
742 off = int(patch[minus+1:] or 1)
742 off = int(patch[minus+1:] or 1)
743 except(ValueError, OverflowError):
743 except(ValueError, OverflowError):
744 pass
744 pass
745 else:
745 else:
746 if i - off >= 0:
746 if i - off >= 0:
747 return self.series[i - off]
747 return self.series[i - off]
748 plus = patch.rfind('+')
748 plus = patch.rfind('+')
749 if plus >= 0:
749 if plus >= 0:
750 res = partial_name(patch[:plus])
750 res = partial_name(patch[:plus])
751 if res:
751 if res:
752 i = self.series.index(res)
752 i = self.series.index(res)
753 try:
753 try:
754 off = int(patch[plus+1:] or 1)
754 off = int(patch[plus+1:] or 1)
755 except(ValueError, OverflowError):
755 except(ValueError, OverflowError):
756 pass
756 pass
757 else:
757 else:
758 if i + off < len(self.series):
758 if i + off < len(self.series):
759 return self.series[i + off]
759 return self.series[i + off]
760 raise util.Abort(_("patch %s not in series") % patch)
760 raise util.Abort(_("patch %s not in series") % patch)
761
761
762 def push(self, repo, patch=None, force=False, list=False,
762 def push(self, repo, patch=None, force=False, list=False,
763 mergeq=None):
763 mergeq=None):
764 wlock = repo.wlock()
764 wlock = repo.wlock()
765 try:
765 try:
766 patch = self.lookup(patch)
766 patch = self.lookup(patch)
767 # Suppose our series file is: A B C and the current 'top'
767 # Suppose our series file is: A B C and the current 'top'
768 # patch is B. qpush C should be performed (moving forward)
768 # patch is B. qpush C should be performed (moving forward)
769 # qpush B is a NOP (no change) qpush A is an error (can't
769 # qpush B is a NOP (no change) qpush A is an error (can't
770 # go backwards with qpush)
770 # go backwards with qpush)
771 if patch:
771 if patch:
772 info = self.isapplied(patch)
772 info = self.isapplied(patch)
773 if info:
773 if info:
774 if info[0] < len(self.applied) - 1:
774 if info[0] < len(self.applied) - 1:
775 raise util.Abort(
775 raise util.Abort(
776 _("cannot push to a previous patch: %s") % patch)
776 _("cannot push to a previous patch: %s") % patch)
777 if info[0] < len(self.series) - 1:
777 if info[0] < len(self.series) - 1:
778 self.ui.warn(
778 self.ui.warn(
779 _('qpush: %s is already at the top\n') % patch)
779 _('qpush: %s is already at the top\n') % patch)
780 else:
780 else:
781 self.ui.warn(_('all patches are currently applied\n'))
781 self.ui.warn(_('all patches are currently applied\n'))
782 return
782 return
783
783
784 # Following the above example, starting at 'top' of B:
784 # Following the above example, starting at 'top' of B:
785 # qpush should be performed (pushes C), but a subsequent
785 # qpush should be performed (pushes C), but a subsequent
786 # qpush without an argument is an error (nothing to
786 # qpush without an argument is an error (nothing to
787 # apply). This allows a loop of "...while hg qpush..." to
787 # apply). This allows a loop of "...while hg qpush..." to
788 # work as it detects an error when done
788 # work as it detects an error when done
789 if self.series_end() == len(self.series):
789 if self.series_end() == len(self.series):
790 self.ui.warn(_('patch series already fully applied\n'))
790 self.ui.warn(_('patch series already fully applied\n'))
791 return 1
791 return 1
792 if not force:
792 if not force:
793 self.check_localchanges(repo)
793 self.check_localchanges(repo)
794
794
795 self.applied_dirty = 1;
795 self.applied_dirty = 1;
796 start = self.series_end()
796 start = self.series_end()
797 if start > 0:
797 if start > 0:
798 self.check_toppatch(repo)
798 self.check_toppatch(repo)
799 if not patch:
799 if not patch:
800 patch = self.series[start]
800 patch = self.series[start]
801 end = start + 1
801 end = start + 1
802 else:
802 else:
803 end = self.series.index(patch, start) + 1
803 end = self.series.index(patch, start) + 1
804 s = self.series[start:end]
804 s = self.series[start:end]
805 all_files = {}
805 all_files = {}
806 try:
806 try:
807 if mergeq:
807 if mergeq:
808 ret = self.mergepatch(repo, mergeq, s)
808 ret = self.mergepatch(repo, mergeq, s)
809 else:
809 else:
810 ret = self.apply(repo, s, list, all_files=all_files)
810 ret = self.apply(repo, s, list, all_files=all_files)
811 except:
811 except:
812 self.ui.warn(_('cleaning up working directory...'))
812 self.ui.warn(_('cleaning up working directory...'))
813 node = repo.dirstate.parents()[0]
813 node = repo.dirstate.parents()[0]
814 hg.revert(repo, node, None)
814 hg.revert(repo, node, None)
815 unknown = repo.status()[4]
815 unknown = repo.status()[4]
816 # only remove unknown files that we know we touched or
816 # only remove unknown files that we know we touched or
817 # created while patching
817 # created while patching
818 for f in unknown:
818 for f in unknown:
819 if f in all_files:
819 if f in all_files:
820 util.unlink(repo.wjoin(f))
820 util.unlink(repo.wjoin(f))
821 self.ui.warn(_('done\n'))
821 self.ui.warn(_('done\n'))
822 raise
822 raise
823 top = self.applied[-1].name
823 top = self.applied[-1].name
824 if ret[0]:
824 if ret[0]:
825 self.ui.write(
825 self.ui.write(
826 "Errors during apply, please fix and refresh %s\n" % top)
826 "Errors during apply, please fix and refresh %s\n" % top)
827 else:
827 else:
828 self.ui.write("Now at: %s\n" % top)
828 self.ui.write("Now at: %s\n" % top)
829 return ret[0]
829 return ret[0]
830 finally:
830 finally:
831 del wlock
831 del wlock
832
832
833 def pop(self, repo, patch=None, force=False, update=True, all=False):
833 def pop(self, repo, patch=None, force=False, update=True, all=False):
834 def getfile(f, rev, flags):
834 def getfile(f, rev, flags):
835 t = repo.file(f).read(rev)
835 t = repo.file(f).read(rev)
836 repo.wwrite(f, t, flags)
836 repo.wwrite(f, t, flags)
837
837
838 wlock = repo.wlock()
838 wlock = repo.wlock()
839 try:
839 try:
840 if patch:
840 if patch:
841 # index, rev, patch
841 # index, rev, patch
842 info = self.isapplied(patch)
842 info = self.isapplied(patch)
843 if not info:
843 if not info:
844 patch = self.lookup(patch)
844 patch = self.lookup(patch)
845 info = self.isapplied(patch)
845 info = self.isapplied(patch)
846 if not info:
846 if not info:
847 raise util.Abort(_("patch %s is not applied") % patch)
847 raise util.Abort(_("patch %s is not applied") % patch)
848
848
849 if len(self.applied) == 0:
849 if len(self.applied) == 0:
850 # Allow qpop -a to work repeatedly,
850 # Allow qpop -a to work repeatedly,
851 # but not qpop without an argument
851 # but not qpop without an argument
852 self.ui.warn(_("no patches applied\n"))
852 self.ui.warn(_("no patches applied\n"))
853 return not all
853 return not all
854
854
855 if not update:
855 if not update:
856 parents = repo.dirstate.parents()
856 parents = repo.dirstate.parents()
857 rr = [ revlog.bin(x.rev) for x in self.applied ]
857 rr = [ revlog.bin(x.rev) for x in self.applied ]
858 for p in parents:
858 for p in parents:
859 if p in rr:
859 if p in rr:
860 self.ui.warn("qpop: forcing dirstate update\n")
860 self.ui.warn("qpop: forcing dirstate update\n")
861 update = True
861 update = True
862
862
863 if not force and update:
863 if not force and update:
864 self.check_localchanges(repo)
864 self.check_localchanges(repo)
865
865
866 self.applied_dirty = 1;
866 self.applied_dirty = 1;
867 end = len(self.applied)
867 end = len(self.applied)
868 if not patch:
868 if not patch:
869 if all:
869 if all:
870 popi = 0
870 popi = 0
871 else:
871 else:
872 popi = len(self.applied) - 1
872 popi = len(self.applied) - 1
873 else:
873 else:
874 popi = info[0] + 1
874 popi = info[0] + 1
875 if popi >= end:
875 if popi >= end:
876 self.ui.warn("qpop: %s is already at the top\n" % patch)
876 self.ui.warn("qpop: %s is already at the top\n" % patch)
877 return
877 return
878 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
878 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
879
879
880 start = info[0]
880 start = info[0]
881 rev = revlog.bin(info[1])
881 rev = revlog.bin(info[1])
882
882
883 if update:
883 if update:
884 top = self.check_toppatch(repo)
884 top = self.check_toppatch(repo)
885
885
886 if repo.changelog.heads(rev) != [revlog.bin(self.applied[-1].rev)]:
886 if repo.changelog.heads(rev) != [revlog.bin(self.applied[-1].rev)]:
887 raise util.Abort("popping would remove a revision not "
887 raise util.Abort("popping would remove a revision not "
888 "managed by this patch queue")
888 "managed by this patch queue")
889
889
890 # we know there are no local changes, so we can make a simplified
890 # we know there are no local changes, so we can make a simplified
891 # form of hg.update.
891 # form of hg.update.
892 if update:
892 if update:
893 qp = self.qparents(repo, rev)
893 qp = self.qparents(repo, rev)
894 changes = repo.changelog.read(qp)
894 changes = repo.changelog.read(qp)
895 mmap = repo.manifest.read(changes[0])
895 mmap = repo.manifest.read(changes[0])
896 m, a, r, d, u = repo.status(qp, top)[:5]
896 m, a, r, d, u = repo.status(qp, top)[:5]
897 if d:
897 if d:
898 raise util.Abort("deletions found between repo revs")
898 raise util.Abort("deletions found between repo revs")
899 for f in m:
899 for f in m:
900 getfile(f, mmap[f], mmap.flags(f))
900 getfile(f, mmap[f], mmap.flags(f))
901 for f in r:
901 for f in r:
902 getfile(f, mmap[f], mmap.flags(f))
902 getfile(f, mmap[f], mmap.flags(f))
903 for f in m + r:
903 for f in m + r:
904 repo.dirstate.normal(f)
904 repo.dirstate.normal(f)
905 for f in a:
905 for f in a:
906 try:
906 try:
907 os.unlink(repo.wjoin(f))
907 os.unlink(repo.wjoin(f))
908 except OSError, e:
908 except OSError, e:
909 if e.errno != errno.ENOENT:
909 if e.errno != errno.ENOENT:
910 raise
910 raise
911 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
911 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
912 except: pass
912 except: pass
913 repo.dirstate.forget(f)
913 repo.dirstate.forget(f)
914 repo.dirstate.setparents(qp, revlog.nullid)
914 repo.dirstate.setparents(qp, revlog.nullid)
915 del self.applied[start:end]
915 del self.applied[start:end]
916 self.strip(repo, rev, update=False, backup='strip')
916 self.strip(repo, rev, update=False, backup='strip')
917 if len(self.applied):
917 if len(self.applied):
918 self.ui.write("Now at: %s\n" % self.applied[-1].name)
918 self.ui.write("Now at: %s\n" % self.applied[-1].name)
919 else:
919 else:
920 self.ui.write("Patch queue now empty\n")
920 self.ui.write("Patch queue now empty\n")
921 finally:
921 finally:
922 del wlock
922 del wlock
923
923
924 def diff(self, repo, pats, opts):
924 def diff(self, repo, pats, opts):
925 top = self.check_toppatch(repo)
925 top = self.check_toppatch(repo)
926 if not top:
926 if not top:
927 self.ui.write("No patches applied\n")
927 self.ui.write("No patches applied\n")
928 return
928 return
929 qp = self.qparents(repo, top)
929 qp = self.qparents(repo, top)
930 if opts.get('git'):
930 if opts.get('git'):
931 self.diffopts().git = True
931 self.diffopts().git = True
932 self.printdiff(repo, qp, files=pats, opts=opts)
932 self.printdiff(repo, qp, files=pats, opts=opts)
933
933
934 def refresh(self, repo, pats=None, **opts):
934 def refresh(self, repo, pats=None, **opts):
935 if len(self.applied) == 0:
935 if len(self.applied) == 0:
936 self.ui.write("No patches applied\n")
936 self.ui.write("No patches applied\n")
937 return 1
937 return 1
938 wlock = repo.wlock()
938 wlock = repo.wlock()
939 try:
939 try:
940 self.check_toppatch(repo)
940 self.check_toppatch(repo)
941 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
941 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
942 top = revlog.bin(top)
942 top = revlog.bin(top)
943 if repo.changelog.heads(top) != [top]:
943 if repo.changelog.heads(top) != [top]:
944 raise util.Abort("cannot refresh a revision with children")
944 raise util.Abort("cannot refresh a revision with children")
945 cparents = repo.changelog.parents(top)
945 cparents = repo.changelog.parents(top)
946 patchparent = self.qparents(repo, top)
946 patchparent = self.qparents(repo, top)
947 message, comments, user, date, patchfound = self.readheaders(patchfn)
947 message, comments, user, date, patchfound = self.readheaders(patchfn)
948
948
949 patchf = self.opener(patchfn, 'r+')
949 patchf = self.opener(patchfn, 'r+')
950
950
951 # if the patch was a git patch, refresh it as a git patch
951 # if the patch was a git patch, refresh it as a git patch
952 for line in patchf:
952 for line in patchf:
953 if line.startswith('diff --git'):
953 if line.startswith('diff --git'):
954 self.diffopts().git = True
954 self.diffopts().git = True
955 break
955 break
956
956
957 msg = opts.get('msg', '').rstrip()
957 msg = opts.get('msg', '').rstrip()
958 if msg and comments:
958 if msg and comments:
959 # Remove existing message, keeping the rest of the comments
959 # Remove existing message, keeping the rest of the comments
960 # fields.
960 # fields.
961 # If comments contains 'subject: ', message will prepend
961 # If comments contains 'subject: ', message will prepend
962 # the field and a blank line.
962 # the field and a blank line.
963 if message:
963 if message:
964 subj = 'subject: ' + message[0].lower()
964 subj = 'subject: ' + message[0].lower()
965 for i in xrange(len(comments)):
965 for i in xrange(len(comments)):
966 if subj == comments[i].lower():
966 if subj == comments[i].lower():
967 del comments[i]
967 del comments[i]
968 message = message[2:]
968 message = message[2:]
969 break
969 break
970 ci = 0
970 ci = 0
971 for mi in xrange(len(message)):
971 for mi in xrange(len(message)):
972 while message[mi] != comments[ci]:
972 while message[mi] != comments[ci]:
973 ci += 1
973 ci += 1
974 del comments[ci]
974 del comments[ci]
975
975
976 def setheaderfield(comments, prefixes, new):
976 def setheaderfield(comments, prefixes, new):
977 # Update all references to a field in the patch header.
977 # Update all references to a field in the patch header.
978 # If none found, add it email style.
978 # If none found, add it email style.
979 res = False
979 res = False
980 for prefix in prefixes:
980 for prefix in prefixes:
981 for i in xrange(len(comments)):
981 for i in xrange(len(comments)):
982 if comments[i].startswith(prefix):
982 if comments[i].startswith(prefix):
983 comments[i] = prefix + new
983 comments[i] = prefix + new
984 res = True
984 res = True
985 break
985 break
986 return res
986 return res
987
987
988 newuser = opts.get('user')
988 newuser = opts.get('user')
989 if newuser:
989 if newuser:
990 if not setheaderfield(comments, ['From: ', '# User '], newuser):
990 if not setheaderfield(comments, ['From: ', '# User '], newuser):
991 try:
991 try:
992 patchheaderat = comments.index('# HG changeset patch')
992 patchheaderat = comments.index('# HG changeset patch')
993 comments.insert(patchheaderat + 1,'# User ' + newuser)
993 comments.insert(patchheaderat + 1,'# User ' + newuser)
994 except ValueError:
994 except ValueError:
995 comments = ['From: ' + newuser, ''] + comments
995 comments = ['From: ' + newuser, ''] + comments
996 user = newuser
996 user = newuser
997
997
998 newdate = opts.get('date')
998 newdate = opts.get('date')
999 if newdate:
999 if newdate:
1000 if setheaderfield(comments, ['# Date '], newdate):
1000 if setheaderfield(comments, ['# Date '], newdate):
1001 date = newdate
1001 date = newdate
1002
1002
1003 if msg:
1003 if msg:
1004 comments.append(msg)
1004 comments.append(msg)
1005
1005
1006 patchf.seek(0)
1006 patchf.seek(0)
1007 patchf.truncate()
1007 patchf.truncate()
1008
1008
1009 if comments:
1009 if comments:
1010 comments = "\n".join(comments) + '\n\n'
1010 comments = "\n".join(comments) + '\n\n'
1011 patchf.write(comments)
1011 patchf.write(comments)
1012
1012
1013 if opts.get('git'):
1013 if opts.get('git'):
1014 self.diffopts().git = True
1014 self.diffopts().git = True
1015 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1015 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1016 tip = repo.changelog.tip()
1016 tip = repo.changelog.tip()
1017 if top == tip:
1017 if top == tip:
1018 # if the top of our patch queue is also the tip, there is an
1018 # if the top of our patch queue is also the tip, there is an
1019 # optimization here. We update the dirstate in place and strip
1019 # optimization here. We update the dirstate in place and strip
1020 # off the tip commit. Then just commit the current directory
1020 # off the tip commit. Then just commit the current directory
1021 # tree. We can also send repo.commit the list of files
1021 # tree. We can also send repo.commit the list of files
1022 # changed to speed up the diff
1022 # changed to speed up the diff
1023 #
1023 #
1024 # in short mode, we only diff the files included in the
1024 # in short mode, we only diff the files included in the
1025 # patch already
1025 # patch already
1026 #
1026 #
1027 # this should really read:
1027 # this should really read:
1028 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1028 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1029 # but we do it backwards to take advantage of manifest/chlog
1029 # but we do it backwards to take advantage of manifest/chlog
1030 # caching against the next repo.status call
1030 # caching against the next repo.status call
1031 #
1031 #
1032 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1032 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1033 changes = repo.changelog.read(tip)
1033 changes = repo.changelog.read(tip)
1034 man = repo.manifest.read(changes[0])
1034 man = repo.manifest.read(changes[0])
1035 aaa = aa[:]
1035 aaa = aa[:]
1036 if opts.get('short'):
1036 if opts.get('short'):
1037 filelist = mm + aa + dd
1037 filelist = mm + aa + dd
1038 match = dict.fromkeys(filelist).__contains__
1038 match = dict.fromkeys(filelist).__contains__
1039 else:
1039 else:
1040 filelist = None
1040 filelist = None
1041 match = util.always
1041 match = util.always
1042 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1042 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1043
1043
1044 # we might end up with files that were added between
1044 # we might end up with files that were added between
1045 # tip and the dirstate parent, but then changed in the
1045 # tip and the dirstate parent, but then changed in the
1046 # local dirstate. in this case, we want them to only
1046 # local dirstate. in this case, we want them to only
1047 # show up in the added section
1047 # show up in the added section
1048 for x in m:
1048 for x in m:
1049 if x not in aa:
1049 if x not in aa:
1050 mm.append(x)
1050 mm.append(x)
1051 # we might end up with files added by the local dirstate that
1051 # we might end up with files added by the local dirstate that
1052 # were deleted by the patch. In this case, they should only
1052 # were deleted by the patch. In this case, they should only
1053 # show up in the changed section.
1053 # show up in the changed section.
1054 for x in a:
1054 for x in a:
1055 if x in dd:
1055 if x in dd:
1056 del dd[dd.index(x)]
1056 del dd[dd.index(x)]
1057 mm.append(x)
1057 mm.append(x)
1058 else:
1058 else:
1059 aa.append(x)
1059 aa.append(x)
1060 # make sure any files deleted in the local dirstate
1060 # make sure any files deleted in the local dirstate
1061 # are not in the add or change column of the patch
1061 # are not in the add or change column of the patch
1062 forget = []
1062 forget = []
1063 for x in d + r:
1063 for x in d + r:
1064 if x in aa:
1064 if x in aa:
1065 del aa[aa.index(x)]
1065 del aa[aa.index(x)]
1066 forget.append(x)
1066 forget.append(x)
1067 continue
1067 continue
1068 elif x in mm:
1068 elif x in mm:
1069 del mm[mm.index(x)]
1069 del mm[mm.index(x)]
1070 dd.append(x)
1070 dd.append(x)
1071
1071
1072 m = util.unique(mm)
1072 m = util.unique(mm)
1073 r = util.unique(dd)
1073 r = util.unique(dd)
1074 a = util.unique(aa)
1074 a = util.unique(aa)
1075 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1075 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1076 filelist = util.unique(c[0] + c[1] + c[2])
1076 filelist = util.unique(c[0] + c[1] + c[2])
1077 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1077 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1078 fp=patchf, changes=c, opts=self.diffopts())
1078 fp=patchf, changes=c, opts=self.diffopts())
1079 patchf.close()
1079 patchf.close()
1080
1080
1081 repo.dirstate.setparents(*cparents)
1081 repo.dirstate.setparents(*cparents)
1082 copies = {}
1082 copies = {}
1083 for dst in a:
1083 for dst in a:
1084 src = repo.dirstate.copied(dst)
1084 src = repo.dirstate.copied(dst)
1085 if src is not None:
1085 if src is not None:
1086 copies.setdefault(src, []).append(dst)
1086 copies.setdefault(src, []).append(dst)
1087 repo.dirstate.add(dst)
1087 repo.dirstate.add(dst)
1088 # remember the copies between patchparent and tip
1088 # remember the copies between patchparent and tip
1089 # this may be slow, so don't do it if we're not tracking copies
1089 # this may be slow, so don't do it if we're not tracking copies
1090 if self.diffopts().git:
1090 if self.diffopts().git:
1091 for dst in aaa:
1091 for dst in aaa:
1092 f = repo.file(dst)
1092 f = repo.file(dst)
1093 src = f.renamed(man[dst])
1093 src = f.renamed(man[dst])
1094 if src:
1094 if src:
1095 copies[src[0]] = copies.get(dst, [])
1095 copies[src[0]] = copies.get(dst, [])
1096 if dst in a:
1096 if dst in a:
1097 copies[src[0]].append(dst)
1097 copies[src[0]].append(dst)
1098 # we can't copy a file created by the patch itself
1098 # we can't copy a file created by the patch itself
1099 if dst in copies:
1099 if dst in copies:
1100 del copies[dst]
1100 del copies[dst]
1101 for src, dsts in copies.iteritems():
1101 for src, dsts in copies.iteritems():
1102 for dst in dsts:
1102 for dst in dsts:
1103 repo.dirstate.copy(src, dst)
1103 repo.dirstate.copy(src, dst)
1104 for f in r:
1104 for f in r:
1105 repo.dirstate.remove(f)
1105 repo.dirstate.remove(f)
1106 # if the patch excludes a modified file, mark that
1106 # if the patch excludes a modified file, mark that
1107 # file with mtime=0 so status can see it.
1107 # file with mtime=0 so status can see it.
1108 mm = []
1108 mm = []
1109 for i in xrange(len(m)-1, -1, -1):
1109 for i in xrange(len(m)-1, -1, -1):
1110 if not matchfn(m[i]):
1110 if not matchfn(m[i]):
1111 mm.append(m[i])
1111 mm.append(m[i])
1112 del m[i]
1112 del m[i]
1113 for f in m:
1113 for f in m:
1114 repo.dirstate.normal(f)
1114 repo.dirstate.normal(f)
1115 for f in mm:
1115 for f in mm:
1116 repo.dirstate.normallookup(f)
1116 repo.dirstate.normallookup(f)
1117 for f in forget:
1117 for f in forget:
1118 repo.dirstate.forget(f)
1118 repo.dirstate.forget(f)
1119
1119
1120 if not msg:
1120 if not msg:
1121 if not message:
1121 if not message:
1122 message = "[mq]: %s\n" % patchfn
1122 message = "[mq]: %s\n" % patchfn
1123 else:
1123 else:
1124 message = "\n".join(message)
1124 message = "\n".join(message)
1125 else:
1125 else:
1126 message = msg
1126 message = msg
1127
1127
1128 if not user:
1128 if not user:
1129 user = changes[1]
1129 user = changes[1]
1130
1130
1131 self.applied.pop()
1131 self.applied.pop()
1132 self.applied_dirty = 1
1132 self.applied_dirty = 1
1133 self.strip(repo, top, update=False,
1133 self.strip(repo, top, update=False,
1134 backup='strip')
1134 backup='strip')
1135 n = repo.commit(filelist, message, user, date, match=matchfn,
1135 n = repo.commit(filelist, message, user, date, match=matchfn,
1136 force=1)
1136 force=1)
1137 self.applied.append(statusentry(revlog.hex(n), patchfn))
1137 self.applied.append(statusentry(revlog.hex(n), patchfn))
1138 self.removeundo(repo)
1138 self.removeundo(repo)
1139 else:
1139 else:
1140 self.printdiff(repo, patchparent, fp=patchf)
1140 self.printdiff(repo, patchparent, fp=patchf)
1141 patchf.close()
1141 patchf.close()
1142 added = repo.status()[1]
1142 added = repo.status()[1]
1143 for a in added:
1143 for a in added:
1144 f = repo.wjoin(a)
1144 f = repo.wjoin(a)
1145 try:
1145 try:
1146 os.unlink(f)
1146 os.unlink(f)
1147 except OSError, e:
1147 except OSError, e:
1148 if e.errno != errno.ENOENT:
1148 if e.errno != errno.ENOENT:
1149 raise
1149 raise
1150 try: os.removedirs(os.path.dirname(f))
1150 try: os.removedirs(os.path.dirname(f))
1151 except: pass
1151 except: pass
1152 # forget the file copies in the dirstate
1152 # forget the file copies in the dirstate
1153 # push should readd the files later on
1153 # push should readd the files later on
1154 repo.dirstate.forget(a)
1154 repo.dirstate.forget(a)
1155 self.pop(repo, force=True)
1155 self.pop(repo, force=True)
1156 self.push(repo, force=True)
1156 self.push(repo, force=True)
1157 finally:
1157 finally:
1158 del wlock
1158 del wlock
1159
1159
1160 def init(self, repo, create=False):
1160 def init(self, repo, create=False):
1161 if not create and os.path.isdir(self.path):
1161 if not create and os.path.isdir(self.path):
1162 raise util.Abort(_("patch queue directory already exists"))
1162 raise util.Abort(_("patch queue directory already exists"))
1163 try:
1163 try:
1164 os.mkdir(self.path)
1164 os.mkdir(self.path)
1165 except OSError, inst:
1165 except OSError, inst:
1166 if inst.errno != errno.EEXIST or not create:
1166 if inst.errno != errno.EEXIST or not create:
1167 raise
1167 raise
1168 if create:
1168 if create:
1169 return self.qrepo(create=True)
1169 return self.qrepo(create=True)
1170
1170
1171 def unapplied(self, repo, patch=None):
1171 def unapplied(self, repo, patch=None):
1172 if patch and patch not in self.series:
1172 if patch and patch not in self.series:
1173 raise util.Abort(_("patch %s is not in series file") % patch)
1173 raise util.Abort(_("patch %s is not in series file") % patch)
1174 if not patch:
1174 if not patch:
1175 start = self.series_end()
1175 start = self.series_end()
1176 else:
1176 else:
1177 start = self.series.index(patch) + 1
1177 start = self.series.index(patch) + 1
1178 unapplied = []
1178 unapplied = []
1179 for i in xrange(start, len(self.series)):
1179 for i in xrange(start, len(self.series)):
1180 pushable, reason = self.pushable(i)
1180 pushable, reason = self.pushable(i)
1181 if pushable:
1181 if pushable:
1182 unapplied.append((i, self.series[i]))
1182 unapplied.append((i, self.series[i]))
1183 self.explain_pushable(i)
1183 self.explain_pushable(i)
1184 return unapplied
1184 return unapplied
1185
1185
1186 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1186 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1187 summary=False):
1187 summary=False):
1188 def displayname(patchname):
1188 def displayname(patchname):
1189 if summary:
1189 if summary:
1190 msg = self.readheaders(patchname)[0]
1190 msg = self.readheaders(patchname)[0]
1191 msg = msg and ': ' + msg[0] or ': '
1191 msg = msg and ': ' + msg[0] or ': '
1192 else:
1192 else:
1193 msg = ''
1193 msg = ''
1194 return '%s%s' % (patchname, msg)
1194 return '%s%s' % (patchname, msg)
1195
1195
1196 applied = dict.fromkeys([p.name for p in self.applied])
1196 applied = dict.fromkeys([p.name for p in self.applied])
1197 if length is None:
1197 if length is None:
1198 length = len(self.series) - start
1198 length = len(self.series) - start
1199 if not missing:
1199 if not missing:
1200 for i in xrange(start, start+length):
1200 for i in xrange(start, start+length):
1201 patch = self.series[i]
1201 patch = self.series[i]
1202 if patch in applied:
1202 if patch in applied:
1203 stat = 'A'
1203 stat = 'A'
1204 elif self.pushable(i)[0]:
1204 elif self.pushable(i)[0]:
1205 stat = 'U'
1205 stat = 'U'
1206 else:
1206 else:
1207 stat = 'G'
1207 stat = 'G'
1208 pfx = ''
1208 pfx = ''
1209 if self.ui.verbose:
1209 if self.ui.verbose:
1210 pfx = '%d %s ' % (i, stat)
1210 pfx = '%d %s ' % (i, stat)
1211 elif status and status != stat:
1211 elif status and status != stat:
1212 continue
1212 continue
1213 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1213 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1214 else:
1214 else:
1215 msng_list = []
1215 msng_list = []
1216 for root, dirs, files in os.walk(self.path):
1216 for root, dirs, files in os.walk(self.path):
1217 d = root[len(self.path) + 1:]
1217 d = root[len(self.path) + 1:]
1218 for f in files:
1218 for f in files:
1219 fl = os.path.join(d, f)
1219 fl = os.path.join(d, f)
1220 if (fl not in self.series and
1220 if (fl not in self.series and
1221 fl not in (self.status_path, self.series_path,
1221 fl not in (self.status_path, self.series_path,
1222 self.guards_path)
1222 self.guards_path)
1223 and not fl.startswith('.')):
1223 and not fl.startswith('.')):
1224 msng_list.append(fl)
1224 msng_list.append(fl)
1225 msng_list.sort()
1225 msng_list.sort()
1226 for x in msng_list:
1226 for x in msng_list:
1227 pfx = self.ui.verbose and ('D ') or ''
1227 pfx = self.ui.verbose and ('D ') or ''
1228 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1228 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1229
1229
1230 def issaveline(self, l):
1230 def issaveline(self, l):
1231 if l.name == '.hg.patches.save.line':
1231 if l.name == '.hg.patches.save.line':
1232 return True
1232 return True
1233
1233
1234 def qrepo(self, create=False):
1234 def qrepo(self, create=False):
1235 if create or os.path.isdir(self.join(".hg")):
1235 if create or os.path.isdir(self.join(".hg")):
1236 return hg.repository(self.ui, path=self.path, create=create)
1236 return hg.repository(self.ui, path=self.path, create=create)
1237
1237
1238 def restore(self, repo, rev, delete=None, qupdate=None):
1238 def restore(self, repo, rev, delete=None, qupdate=None):
1239 c = repo.changelog.read(rev)
1239 c = repo.changelog.read(rev)
1240 desc = c[4].strip()
1240 desc = c[4].strip()
1241 lines = desc.splitlines()
1241 lines = desc.splitlines()
1242 i = 0
1242 i = 0
1243 datastart = None
1243 datastart = None
1244 series = []
1244 series = []
1245 applied = []
1245 applied = []
1246 qpp = None
1246 qpp = None
1247 for i in xrange(0, len(lines)):
1247 for i in xrange(0, len(lines)):
1248 if lines[i] == 'Patch Data:':
1248 if lines[i] == 'Patch Data:':
1249 datastart = i + 1
1249 datastart = i + 1
1250 elif lines[i].startswith('Dirstate:'):
1250 elif lines[i].startswith('Dirstate:'):
1251 l = lines[i].rstrip()
1251 l = lines[i].rstrip()
1252 l = l[10:].split(' ')
1252 l = l[10:].split(' ')
1253 qpp = [ hg.bin(x) for x in l ]
1253 qpp = [ hg.bin(x) for x in l ]
1254 elif datastart != None:
1254 elif datastart != None:
1255 l = lines[i].rstrip()
1255 l = lines[i].rstrip()
1256 se = statusentry(l)
1256 se = statusentry(l)
1257 file_ = se.name
1257 file_ = se.name
1258 if se.rev:
1258 if se.rev:
1259 applied.append(se)
1259 applied.append(se)
1260 else:
1260 else:
1261 series.append(file_)
1261 series.append(file_)
1262 if datastart == None:
1262 if datastart == None:
1263 self.ui.warn("No saved patch data found\n")
1263 self.ui.warn("No saved patch data found\n")
1264 return 1
1264 return 1
1265 self.ui.warn("restoring status: %s\n" % lines[0])
1265 self.ui.warn("restoring status: %s\n" % lines[0])
1266 self.full_series = series
1266 self.full_series = series
1267 self.applied = applied
1267 self.applied = applied
1268 self.parse_series()
1268 self.parse_series()
1269 self.series_dirty = 1
1269 self.series_dirty = 1
1270 self.applied_dirty = 1
1270 self.applied_dirty = 1
1271 heads = repo.changelog.heads()
1271 heads = repo.changelog.heads()
1272 if delete:
1272 if delete:
1273 if rev not in heads:
1273 if rev not in heads:
1274 self.ui.warn("save entry has children, leaving it alone\n")
1274 self.ui.warn("save entry has children, leaving it alone\n")
1275 else:
1275 else:
1276 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1276 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1277 pp = repo.dirstate.parents()
1277 pp = repo.dirstate.parents()
1278 if rev in pp:
1278 if rev in pp:
1279 update = True
1279 update = True
1280 else:
1280 else:
1281 update = False
1281 update = False
1282 self.strip(repo, rev, update=update, backup='strip')
1282 self.strip(repo, rev, update=update, backup='strip')
1283 if qpp:
1283 if qpp:
1284 self.ui.warn("saved queue repository parents: %s %s\n" %
1284 self.ui.warn("saved queue repository parents: %s %s\n" %
1285 (hg.short(qpp[0]), hg.short(qpp[1])))
1285 (hg.short(qpp[0]), hg.short(qpp[1])))
1286 if qupdate:
1286 if qupdate:
1287 self.ui.status(_("queue directory updating\n"))
1287 self.ui.status(_("queue directory updating\n"))
1288 r = self.qrepo()
1288 r = self.qrepo()
1289 if not r:
1289 if not r:
1290 self.ui.warn("Unable to load queue repository\n")
1290 self.ui.warn("Unable to load queue repository\n")
1291 return 1
1291 return 1
1292 hg.clean(r, qpp[0])
1292 hg.clean(r, qpp[0])
1293
1293
1294 def save(self, repo, msg=None):
1294 def save(self, repo, msg=None):
1295 if len(self.applied) == 0:
1295 if len(self.applied) == 0:
1296 self.ui.warn("save: no patches applied, exiting\n")
1296 self.ui.warn("save: no patches applied, exiting\n")
1297 return 1
1297 return 1
1298 if self.issaveline(self.applied[-1]):
1298 if self.issaveline(self.applied[-1]):
1299 self.ui.warn("status is already saved\n")
1299 self.ui.warn("status is already saved\n")
1300 return 1
1300 return 1
1301
1301
1302 ar = [ ':' + x for x in self.full_series ]
1302 ar = [ ':' + x for x in self.full_series ]
1303 if not msg:
1303 if not msg:
1304 msg = "hg patches saved state"
1304 msg = "hg patches saved state"
1305 else:
1305 else:
1306 msg = "hg patches: " + msg.rstrip('\r\n')
1306 msg = "hg patches: " + msg.rstrip('\r\n')
1307 r = self.qrepo()
1307 r = self.qrepo()
1308 if r:
1308 if r:
1309 pp = r.dirstate.parents()
1309 pp = r.dirstate.parents()
1310 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1310 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1311 msg += "\n\nPatch Data:\n"
1311 msg += "\n\nPatch Data:\n"
1312 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1312 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1313 "\n".join(ar) + '\n' or "")
1313 "\n".join(ar) + '\n' or "")
1314 n = repo.commit(None, text, user=None, force=1)
1314 n = repo.commit(None, text, user=None, force=1)
1315 if not n:
1315 if not n:
1316 self.ui.warn("repo commit failed\n")
1316 self.ui.warn("repo commit failed\n")
1317 return 1
1317 return 1
1318 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1318 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1319 self.applied_dirty = 1
1319 self.applied_dirty = 1
1320 self.removeundo(repo)
1320 self.removeundo(repo)
1321
1321
1322 def full_series_end(self):
1322 def full_series_end(self):
1323 if len(self.applied) > 0:
1323 if len(self.applied) > 0:
1324 p = self.applied[-1].name
1324 p = self.applied[-1].name
1325 end = self.find_series(p)
1325 end = self.find_series(p)
1326 if end == None:
1326 if end == None:
1327 return len(self.full_series)
1327 return len(self.full_series)
1328 return end + 1
1328 return end + 1
1329 return 0
1329 return 0
1330
1330
1331 def series_end(self, all_patches=False):
1331 def series_end(self, all_patches=False):
1332 """If all_patches is False, return the index of the next pushable patch
1332 """If all_patches is False, return the index of the next pushable patch
1333 in the series, or the series length. If all_patches is True, return the
1333 in the series, or the series length. If all_patches is True, return the
1334 index of the first patch past the last applied one.
1334 index of the first patch past the last applied one.
1335 """
1335 """
1336 end = 0
1336 end = 0
1337 def next(start):
1337 def next(start):
1338 if all_patches:
1338 if all_patches:
1339 return start
1339 return start
1340 i = start
1340 i = start
1341 while i < len(self.series):
1341 while i < len(self.series):
1342 p, reason = self.pushable(i)
1342 p, reason = self.pushable(i)
1343 if p:
1343 if p:
1344 break
1344 break
1345 self.explain_pushable(i)
1345 self.explain_pushable(i)
1346 i += 1
1346 i += 1
1347 return i
1347 return i
1348 if len(self.applied) > 0:
1348 if len(self.applied) > 0:
1349 p = self.applied[-1].name
1349 p = self.applied[-1].name
1350 try:
1350 try:
1351 end = self.series.index(p)
1351 end = self.series.index(p)
1352 except ValueError:
1352 except ValueError:
1353 return 0
1353 return 0
1354 return next(end + 1)
1354 return next(end + 1)
1355 return next(end)
1355 return next(end)
1356
1356
1357 def appliedname(self, index):
1357 def appliedname(self, index):
1358 pname = self.applied[index].name
1358 pname = self.applied[index].name
1359 if not self.ui.verbose:
1359 if not self.ui.verbose:
1360 p = pname
1360 p = pname
1361 else:
1361 else:
1362 p = str(self.series.index(pname)) + " " + pname
1362 p = str(self.series.index(pname)) + " " + pname
1363 return p
1363 return p
1364
1364
1365 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1365 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1366 force=None, git=False):
1366 force=None, git=False):
1367 def checkseries(patchname):
1367 def checkseries(patchname):
1368 if patchname in self.series:
1368 if patchname in self.series:
1369 raise util.Abort(_('patch %s is already in the series file')
1369 raise util.Abort(_('patch %s is already in the series file')
1370 % patchname)
1370 % patchname)
1371 def checkfile(patchname):
1371 def checkfile(patchname):
1372 if not force and os.path.exists(self.join(patchname)):
1372 if not force and os.path.exists(self.join(patchname)):
1373 raise util.Abort(_('patch "%s" already exists')
1373 raise util.Abort(_('patch "%s" already exists')
1374 % patchname)
1374 % patchname)
1375
1375
1376 if rev:
1376 if rev:
1377 if files:
1377 if files:
1378 raise util.Abort(_('option "-r" not valid when importing '
1378 raise util.Abort(_('option "-r" not valid when importing '
1379 'files'))
1379 'files'))
1380 rev = cmdutil.revrange(repo, rev)
1380 rev = cmdutil.revrange(repo, rev)
1381 rev.sort(lambda x, y: cmp(y, x))
1381 rev.sort(lambda x, y: cmp(y, x))
1382 if (len(files) > 1 or len(rev) > 1) and patchname:
1382 if (len(files) > 1 or len(rev) > 1) and patchname:
1383 raise util.Abort(_('option "-n" not valid when importing multiple '
1383 raise util.Abort(_('option "-n" not valid when importing multiple '
1384 'patches'))
1384 'patches'))
1385 i = 0
1385 i = 0
1386 added = []
1386 added = []
1387 if rev:
1387 if rev:
1388 # If mq patches are applied, we can only import revisions
1388 # If mq patches are applied, we can only import revisions
1389 # that form a linear path to qbase.
1389 # that form a linear path to qbase.
1390 # Otherwise, they should form a linear path to a head.
1390 # Otherwise, they should form a linear path to a head.
1391 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1391 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1392 if len(heads) > 1:
1392 if len(heads) > 1:
1393 raise util.Abort(_('revision %d is the root of more than one '
1393 raise util.Abort(_('revision %d is the root of more than one '
1394 'branch') % rev[-1])
1394 'branch') % rev[-1])
1395 if self.applied:
1395 if self.applied:
1396 base = revlog.hex(repo.changelog.node(rev[0]))
1396 base = revlog.hex(repo.changelog.node(rev[0]))
1397 if base in [n.rev for n in self.applied]:
1397 if base in [n.rev for n in self.applied]:
1398 raise util.Abort(_('revision %d is already managed')
1398 raise util.Abort(_('revision %d is already managed')
1399 % rev[0])
1399 % rev[0])
1400 if heads != [revlog.bin(self.applied[-1].rev)]:
1400 if heads != [revlog.bin(self.applied[-1].rev)]:
1401 raise util.Abort(_('revision %d is not the parent of '
1401 raise util.Abort(_('revision %d is not the parent of '
1402 'the queue') % rev[0])
1402 'the queue') % rev[0])
1403 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1403 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1404 lastparent = repo.changelog.parentrevs(base)[0]
1404 lastparent = repo.changelog.parentrevs(base)[0]
1405 else:
1405 else:
1406 if heads != [repo.changelog.node(rev[0])]:
1406 if heads != [repo.changelog.node(rev[0])]:
1407 raise util.Abort(_('revision %d has unmanaged children')
1407 raise util.Abort(_('revision %d has unmanaged children')
1408 % rev[0])
1408 % rev[0])
1409 lastparent = None
1409 lastparent = None
1410
1410
1411 if git:
1411 if git:
1412 self.diffopts().git = True
1412 self.diffopts().git = True
1413
1413
1414 for r in rev:
1414 for r in rev:
1415 p1, p2 = repo.changelog.parentrevs(r)
1415 p1, p2 = repo.changelog.parentrevs(r)
1416 n = repo.changelog.node(r)
1416 n = repo.changelog.node(r)
1417 if p2 != revlog.nullrev:
1417 if p2 != revlog.nullrev:
1418 raise util.Abort(_('cannot import merge revision %d') % r)
1418 raise util.Abort(_('cannot import merge revision %d') % r)
1419 if lastparent and lastparent != r:
1419 if lastparent and lastparent != r:
1420 raise util.Abort(_('revision %d is not the parent of %d')
1420 raise util.Abort(_('revision %d is not the parent of %d')
1421 % (r, lastparent))
1421 % (r, lastparent))
1422 lastparent = p1
1422 lastparent = p1
1423
1423
1424 if not patchname:
1424 if not patchname:
1425 patchname = normname('%d.diff' % r)
1425 patchname = normname('%d.diff' % r)
1426 self.check_reserved_name(patchname)
1426 self.check_reserved_name(patchname)
1427 checkseries(patchname)
1427 checkseries(patchname)
1428 checkfile(patchname)
1428 checkfile(patchname)
1429 self.full_series.insert(0, patchname)
1429 self.full_series.insert(0, patchname)
1430
1430
1431 patchf = self.opener(patchname, "w")
1431 patchf = self.opener(patchname, "w")
1432 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1432 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1433 patchf.close()
1433 patchf.close()
1434
1434
1435 se = statusentry(revlog.hex(n), patchname)
1435 se = statusentry(revlog.hex(n), patchname)
1436 self.applied.insert(0, se)
1436 self.applied.insert(0, se)
1437
1437
1438 added.append(patchname)
1438 added.append(patchname)
1439 patchname = None
1439 patchname = None
1440 self.parse_series()
1440 self.parse_series()
1441 self.applied_dirty = 1
1441 self.applied_dirty = 1
1442
1442
1443 for filename in files:
1443 for filename in files:
1444 if existing:
1444 if existing:
1445 if filename == '-':
1445 if filename == '-':
1446 raise util.Abort(_('-e is incompatible with import from -'))
1446 raise util.Abort(_('-e is incompatible with import from -'))
1447 if not patchname:
1447 if not patchname:
1448 patchname = normname(filename)
1448 patchname = normname(filename)
1449 self.check_reserved_name(patchname)
1449 self.check_reserved_name(patchname)
1450 if not os.path.isfile(self.join(patchname)):
1450 if not os.path.isfile(self.join(patchname)):
1451 raise util.Abort(_("patch %s does not exist") % patchname)
1451 raise util.Abort(_("patch %s does not exist") % patchname)
1452 else:
1452 else:
1453 try:
1453 try:
1454 if filename == '-':
1454 if filename == '-':
1455 if not patchname:
1455 if not patchname:
1456 raise util.Abort(_('need --name to import a patch from -'))
1456 raise util.Abort(_('need --name to import a patch from -'))
1457 text = sys.stdin.read()
1457 text = sys.stdin.read()
1458 else:
1458 else:
1459 text = file(filename, 'rb').read()
1459 text = file(filename, 'rb').read()
1460 except IOError:
1460 except IOError:
1461 raise util.Abort(_("unable to read %s") % patchname)
1461 raise util.Abort(_("unable to read %s") % patchname)
1462 if not patchname:
1462 if not patchname:
1463 patchname = normname(os.path.basename(filename))
1463 patchname = normname(os.path.basename(filename))
1464 self.check_reserved_name(patchname)
1464 self.check_reserved_name(patchname)
1465 checkfile(patchname)
1465 checkfile(patchname)
1466 patchf = self.opener(patchname, "w")
1466 patchf = self.opener(patchname, "w")
1467 patchf.write(text)
1467 patchf.write(text)
1468 checkseries(patchname)
1468 checkseries(patchname)
1469 index = self.full_series_end() + i
1469 index = self.full_series_end() + i
1470 self.full_series[index:index] = [patchname]
1470 self.full_series[index:index] = [patchname]
1471 self.parse_series()
1471 self.parse_series()
1472 self.ui.warn("adding %s to series file\n" % patchname)
1472 self.ui.warn("adding %s to series file\n" % patchname)
1473 i += 1
1473 i += 1
1474 added.append(patchname)
1474 added.append(patchname)
1475 patchname = None
1475 patchname = None
1476 self.series_dirty = 1
1476 self.series_dirty = 1
1477 qrepo = self.qrepo()
1477 qrepo = self.qrepo()
1478 if qrepo:
1478 if qrepo:
1479 qrepo.add(added)
1479 qrepo.add(added)
1480
1480
1481 def delete(ui, repo, *patches, **opts):
1481 def delete(ui, repo, *patches, **opts):
1482 """remove patches from queue
1482 """remove patches from queue
1483
1483
1484 The patches must not be applied, unless they are arguments to
1484 The patches must not be applied, unless they are arguments to
1485 the --rev parameter. At least one patch or revision is required.
1485 the --rev parameter. At least one patch or revision is required.
1486
1486
1487 With --rev, mq will stop managing the named revisions (converting
1487 With --rev, mq will stop managing the named revisions (converting
1488 them to regular mercurial changesets). The patches must be applied
1488 them to regular mercurial changesets). The patches must be applied
1489 and at the base of the stack. This option is useful when the patches
1489 and at the base of the stack. This option is useful when the patches
1490 have been applied upstream.
1490 have been applied upstream.
1491
1491
1492 With --keep, the patch files are preserved in the patch directory."""
1492 With --keep, the patch files are preserved in the patch directory."""
1493 q = repo.mq
1493 q = repo.mq
1494 q.delete(repo, patches, opts)
1494 q.delete(repo, patches, opts)
1495 q.save_dirty()
1495 q.save_dirty()
1496 return 0
1496 return 0
1497
1497
1498 def applied(ui, repo, patch=None, **opts):
1498 def applied(ui, repo, patch=None, **opts):
1499 """print the patches already applied"""
1499 """print the patches already applied"""
1500 q = repo.mq
1500 q = repo.mq
1501 if patch:
1501 if patch:
1502 if patch not in q.series:
1502 if patch not in q.series:
1503 raise util.Abort(_("patch %s is not in series file") % patch)
1503 raise util.Abort(_("patch %s is not in series file") % patch)
1504 end = q.series.index(patch) + 1
1504 end = q.series.index(patch) + 1
1505 else:
1505 else:
1506 end = q.series_end(True)
1506 end = q.series_end(True)
1507 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1507 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1508
1508
1509 def unapplied(ui, repo, patch=None, **opts):
1509 def unapplied(ui, repo, patch=None, **opts):
1510 """print the patches not yet applied"""
1510 """print the patches not yet applied"""
1511 q = repo.mq
1511 q = repo.mq
1512 if patch:
1512 if patch:
1513 if patch not in q.series:
1513 if patch not in q.series:
1514 raise util.Abort(_("patch %s is not in series file") % patch)
1514 raise util.Abort(_("patch %s is not in series file") % patch)
1515 start = q.series.index(patch) + 1
1515 start = q.series.index(patch) + 1
1516 else:
1516 else:
1517 start = q.series_end(True)
1517 start = q.series_end(True)
1518 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1518 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1519
1519
1520 def qimport(ui, repo, *filename, **opts):
1520 def qimport(ui, repo, *filename, **opts):
1521 """import a patch
1521 """import a patch
1522
1522
1523 The patch will have the same name as its source file unless you
1523 The patch will have the same name as its source file unless you
1524 give it a new one with --name.
1524 give it a new one with --name.
1525
1525
1526 You can register an existing patch inside the patch directory
1526 You can register an existing patch inside the patch directory
1527 with the --existing flag.
1527 with the --existing flag.
1528
1528
1529 With --force, an existing patch of the same name will be overwritten.
1529 With --force, an existing patch of the same name will be overwritten.
1530
1530
1531 An existing changeset may be placed under mq control with --rev
1531 An existing changeset may be placed under mq control with --rev
1532 (e.g. qimport --rev tip -n patch will place tip under mq control).
1532 (e.g. qimport --rev tip -n patch will place tip under mq control).
1533 With --git, patches imported with --rev will use the git diff
1533 With --git, patches imported with --rev will use the git diff
1534 format.
1534 format.
1535 """
1535 """
1536 q = repo.mq
1536 q = repo.mq
1537 q.qimport(repo, filename, patchname=opts['name'],
1537 q.qimport(repo, filename, patchname=opts['name'],
1538 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1538 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1539 git=opts['git'])
1539 git=opts['git'])
1540 q.save_dirty()
1540 q.save_dirty()
1541 return 0
1541 return 0
1542
1542
1543 def init(ui, repo, **opts):
1543 def init(ui, repo, **opts):
1544 """init a new queue repository
1544 """init a new queue repository
1545
1545
1546 The queue repository is unversioned by default. If -c is
1546 The queue repository is unversioned by default. If -c is
1547 specified, qinit will create a separate nested repository
1547 specified, qinit will create a separate nested repository
1548 for patches (qinit -c may also be run later to convert
1548 for patches (qinit -c may also be run later to convert
1549 an unversioned patch repository into a versioned one).
1549 an unversioned patch repository into a versioned one).
1550 You can use qcommit to commit changes to this queue repository."""
1550 You can use qcommit to commit changes to this queue repository."""
1551 q = repo.mq
1551 q = repo.mq
1552 r = q.init(repo, create=opts['create_repo'])
1552 r = q.init(repo, create=opts['create_repo'])
1553 q.save_dirty()
1553 q.save_dirty()
1554 if r:
1554 if r:
1555 if not os.path.exists(r.wjoin('.hgignore')):
1555 if not os.path.exists(r.wjoin('.hgignore')):
1556 fp = r.wopener('.hgignore', 'w')
1556 fp = r.wopener('.hgignore', 'w')
1557 fp.write('^\\.hg\n')
1557 fp.write('^\\.hg\n')
1558 fp.write('^\\.mq\n')
1558 fp.write('^\\.mq\n')
1559 fp.write('syntax: glob\n')
1559 fp.write('syntax: glob\n')
1560 fp.write('status\n')
1560 fp.write('status\n')
1561 fp.write('guards\n')
1561 fp.write('guards\n')
1562 fp.close()
1562 fp.close()
1563 if not os.path.exists(r.wjoin('series')):
1563 if not os.path.exists(r.wjoin('series')):
1564 r.wopener('series', 'w').close()
1564 r.wopener('series', 'w').close()
1565 r.add(['.hgignore', 'series'])
1565 r.add(['.hgignore', 'series'])
1566 commands.add(ui, r)
1566 commands.add(ui, r)
1567 return 0
1567 return 0
1568
1568
1569 def clone(ui, source, dest=None, **opts):
1569 def clone(ui, source, dest=None, **opts):
1570 '''clone main and patch repository at same time
1570 '''clone main and patch repository at same time
1571
1571
1572 If source is local, destination will have no patches applied. If
1572 If source is local, destination will have no patches applied. If
1573 source is remote, this command can not check if patches are
1573 source is remote, this command can not check if patches are
1574 applied in source, so cannot guarantee that patches are not
1574 applied in source, so cannot guarantee that patches are not
1575 applied in destination. If you clone remote repository, be sure
1575 applied in destination. If you clone remote repository, be sure
1576 before that it has no patches applied.
1576 before that it has no patches applied.
1577
1577
1578 Source patch repository is looked for in <src>/.hg/patches by
1578 Source patch repository is looked for in <src>/.hg/patches by
1579 default. Use -p <url> to change.
1579 default. Use -p <url> to change.
1580
1580
1581 The patch directory must be a nested mercurial repository, as
1581 The patch directory must be a nested mercurial repository, as
1582 would be created by qinit -c.
1582 would be created by qinit -c.
1583 '''
1583 '''
1584 def patchdir(repo):
1584 def patchdir(repo):
1585 url = repo.url()
1585 url = repo.url()
1586 if url.endswith('/'):
1586 if url.endswith('/'):
1587 url = url[:-1]
1587 url = url[:-1]
1588 return url + '/.hg/patches'
1588 return url + '/.hg/patches'
1589 cmdutil.setremoteconfig(ui, opts)
1589 cmdutil.setremoteconfig(ui, opts)
1590 if dest is None:
1590 if dest is None:
1591 dest = hg.defaultdest(source)
1591 dest = hg.defaultdest(source)
1592 sr = hg.repository(ui, ui.expandpath(source))
1592 sr = hg.repository(ui, ui.expandpath(source))
1593 patchespath = opts['patches'] or patchdir(sr)
1593 patchespath = opts['patches'] or patchdir(sr)
1594 try:
1594 try:
1595 pr = hg.repository(ui, patchespath)
1595 pr = hg.repository(ui, patchespath)
1596 except hg.RepoError:
1596 except hg.RepoError:
1597 raise util.Abort(_('versioned patch repository not found'
1597 raise util.Abort(_('versioned patch repository not found'
1598 ' (see qinit -c)'))
1598 ' (see qinit -c)'))
1599 qbase, destrev = None, None
1599 qbase, destrev = None, None
1600 if sr.local():
1600 if sr.local():
1601 if sr.mq.applied:
1601 if sr.mq.applied:
1602 qbase = revlog.bin(sr.mq.applied[0].rev)
1602 qbase = revlog.bin(sr.mq.applied[0].rev)
1603 if not hg.islocal(dest):
1603 if not hg.islocal(dest):
1604 heads = dict.fromkeys(sr.heads())
1604 heads = dict.fromkeys(sr.heads())
1605 for h in sr.heads(qbase):
1605 for h in sr.heads(qbase):
1606 del heads[h]
1606 del heads[h]
1607 destrev = heads.keys()
1607 destrev = heads.keys()
1608 destrev.append(sr.changelog.parents(qbase)[0])
1608 destrev.append(sr.changelog.parents(qbase)[0])
1609 ui.note(_('cloning main repo\n'))
1609 ui.note(_('cloning main repo\n'))
1610 sr, dr = hg.clone(ui, sr.url(), dest,
1610 sr, dr = hg.clone(ui, sr.url(), dest,
1611 pull=opts['pull'],
1611 pull=opts['pull'],
1612 rev=destrev,
1612 rev=destrev,
1613 update=False,
1613 update=False,
1614 stream=opts['uncompressed'])
1614 stream=opts['uncompressed'])
1615 ui.note(_('cloning patch repo\n'))
1615 ui.note(_('cloning patch repo\n'))
1616 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1616 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1617 pull=opts['pull'], update=not opts['noupdate'],
1617 pull=opts['pull'], update=not opts['noupdate'],
1618 stream=opts['uncompressed'])
1618 stream=opts['uncompressed'])
1619 if dr.local():
1619 if dr.local():
1620 if qbase:
1620 if qbase:
1621 ui.note(_('stripping applied patches from destination repo\n'))
1621 ui.note(_('stripping applied patches from destination repo\n'))
1622 dr.mq.strip(dr, qbase, update=False, backup=None)
1622 dr.mq.strip(dr, qbase, update=False, backup=None)
1623 if not opts['noupdate']:
1623 if not opts['noupdate']:
1624 ui.note(_('updating destination repo\n'))
1624 ui.note(_('updating destination repo\n'))
1625 hg.update(dr, dr.changelog.tip())
1625 hg.update(dr, dr.changelog.tip())
1626
1626
1627 def commit(ui, repo, *pats, **opts):
1627 def commit(ui, repo, *pats, **opts):
1628 """commit changes in the queue repository"""
1628 """commit changes in the queue repository"""
1629 q = repo.mq
1629 q = repo.mq
1630 r = q.qrepo()
1630 r = q.qrepo()
1631 if not r: raise util.Abort('no queue repository')
1631 if not r: raise util.Abort('no queue repository')
1632 commands.commit(r.ui, r, *pats, **opts)
1632 commands.commit(r.ui, r, *pats, **opts)
1633
1633
1634 def series(ui, repo, **opts):
1634 def series(ui, repo, **opts):
1635 """print the entire series file"""
1635 """print the entire series file"""
1636 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1636 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1637 return 0
1637 return 0
1638
1638
1639 def top(ui, repo, **opts):
1639 def top(ui, repo, **opts):
1640 """print the name of the current patch"""
1640 """print the name of the current patch"""
1641 q = repo.mq
1641 q = repo.mq
1642 t = q.applied and q.series_end(True) or 0
1642 t = q.applied and q.series_end(True) or 0
1643 if t:
1643 if t:
1644 return q.qseries(repo, start=t-1, length=1, status='A',
1644 return q.qseries(repo, start=t-1, length=1, status='A',
1645 summary=opts.get('summary'))
1645 summary=opts.get('summary'))
1646 else:
1646 else:
1647 ui.write("No patches applied\n")
1647 ui.write("No patches applied\n")
1648 return 1
1648 return 1
1649
1649
1650 def next(ui, repo, **opts):
1650 def next(ui, repo, **opts):
1651 """print the name of the next patch"""
1651 """print the name of the next patch"""
1652 q = repo.mq
1652 q = repo.mq
1653 end = q.series_end()
1653 end = q.series_end()
1654 if end == len(q.series):
1654 if end == len(q.series):
1655 ui.write("All patches applied\n")
1655 ui.write("All patches applied\n")
1656 return 1
1656 return 1
1657 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1657 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1658
1658
1659 def prev(ui, repo, **opts):
1659 def prev(ui, repo, **opts):
1660 """print the name of the previous patch"""
1660 """print the name of the previous patch"""
1661 q = repo.mq
1661 q = repo.mq
1662 l = len(q.applied)
1662 l = len(q.applied)
1663 if l == 1:
1663 if l == 1:
1664 ui.write("Only one patch applied\n")
1664 ui.write("Only one patch applied\n")
1665 return 1
1665 return 1
1666 if not l:
1666 if not l:
1667 ui.write("No patches applied\n")
1667 ui.write("No patches applied\n")
1668 return 1
1668 return 1
1669 return q.qseries(repo, start=l-2, length=1, status='A',
1669 return q.qseries(repo, start=l-2, length=1, status='A',
1670 summary=opts.get('summary'))
1670 summary=opts.get('summary'))
1671
1671
1672 def setupheaderopts(ui, opts):
1672 def setupheaderopts(ui, opts):
1673 def do(opt,val):
1673 def do(opt,val):
1674 if not opts[opt] and opts['current' + opt]:
1674 if not opts[opt] and opts['current' + opt]:
1675 opts[opt] = val
1675 opts[opt] = val
1676 do('user', ui.username())
1676 do('user', ui.username())
1677 do('date', "%d %d" % util.makedate())
1677 do('date', "%d %d" % util.makedate())
1678
1678
1679 def new(ui, repo, patch, *args, **opts):
1679 def new(ui, repo, patch, *args, **opts):
1680 """create a new patch
1680 """create a new patch
1681
1681
1682 qnew creates a new patch on top of the currently-applied patch
1682 qnew creates a new patch on top of the currently-applied patch
1683 (if any). It will refuse to run if there are any outstanding
1683 (if any). It will refuse to run if there are any outstanding
1684 changes unless -f is specified, in which case the patch will
1684 changes unless -f is specified, in which case the patch will
1685 be initialised with them. You may also use -I, -X, and/or a list of
1685 be initialised with them. You may also use -I, -X, and/or a list of
1686 files after the patch name to add only changes to matching files
1686 files after the patch name to add only changes to matching files
1687 to the new patch, leaving the rest as uncommitted modifications.
1687 to the new patch, leaving the rest as uncommitted modifications.
1688
1688
1689 -e, -m or -l set the patch header as well as the commit message.
1689 -e, -m or -l set the patch header as well as the commit message.
1690 If none is specified, the patch header is empty and the
1690 If none is specified, the patch header is empty and the
1691 commit message is '[mq]: PATCH'"""
1691 commit message is '[mq]: PATCH'"""
1692 q = repo.mq
1692 q = repo.mq
1693 message = cmdutil.logmessage(opts)
1693 message = cmdutil.logmessage(opts)
1694 if opts['edit']:
1694 if opts['edit']:
1695 message = ui.edit(message, ui.username())
1695 message = ui.edit(message, ui.username())
1696 opts['msg'] = message
1696 opts['msg'] = message
1697 setupheaderopts(ui, opts)
1697 setupheaderopts(ui, opts)
1698 q.new(repo, patch, *args, **opts)
1698 q.new(repo, patch, *args, **opts)
1699 q.save_dirty()
1699 q.save_dirty()
1700 return 0
1700 return 0
1701
1701
1702 def refresh(ui, repo, *pats, **opts):
1702 def refresh(ui, repo, *pats, **opts):
1703 """update the current patch
1703 """update the current patch
1704
1704
1705 If any file patterns are provided, the refreshed patch will contain only
1705 If any file patterns are provided, the refreshed patch will contain only
1706 the modifications that match those patterns; the remaining modifications
1706 the modifications that match those patterns; the remaining modifications
1707 will remain in the working directory.
1707 will remain in the working directory.
1708
1708
1709 hg add/remove/copy/rename work as usual, though you might want to use
1709 hg add/remove/copy/rename work as usual, though you might want to use
1710 git-style patches (--git or [diff] git=1) to track copies and renames.
1710 git-style patches (--git or [diff] git=1) to track copies and renames.
1711 """
1711 """
1712 q = repo.mq
1712 q = repo.mq
1713 message = cmdutil.logmessage(opts)
1713 message = cmdutil.logmessage(opts)
1714 if opts['edit']:
1714 if opts['edit']:
1715 if not q.applied:
1715 if not q.applied:
1716 ui.write(_("No patches applied\n"))
1716 ui.write(_("No patches applied\n"))
1717 return 1
1717 return 1
1718 if message:
1718 if message:
1719 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1719 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1720 patch = q.applied[-1].name
1720 patch = q.applied[-1].name
1721 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1721 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1722 message = ui.edit('\n'.join(message), user or ui.username())
1722 message = ui.edit('\n'.join(message), user or ui.username())
1723 setupheaderopts(ui, opts)
1723 setupheaderopts(ui, opts)
1724 ret = q.refresh(repo, pats, msg=message, **opts)
1724 ret = q.refresh(repo, pats, msg=message, **opts)
1725 q.save_dirty()
1725 q.save_dirty()
1726 return ret
1726 return ret
1727
1727
1728 def diff(ui, repo, *pats, **opts):
1728 def diff(ui, repo, *pats, **opts):
1729 """diff of the current patch"""
1729 """diff of the current patch"""
1730 repo.mq.diff(repo, pats, opts)
1730 repo.mq.diff(repo, pats, opts)
1731 return 0
1731 return 0
1732
1732
1733 def fold(ui, repo, *files, **opts):
1733 def fold(ui, repo, *files, **opts):
1734 """fold the named patches into the current patch
1734 """fold the named patches into the current patch
1735
1735
1736 Patches must not yet be applied. Each patch will be successively
1736 Patches must not yet be applied. Each patch will be successively
1737 applied to the current patch in the order given. If all the
1737 applied to the current patch in the order given. If all the
1738 patches apply successfully, the current patch will be refreshed
1738 patches apply successfully, the current patch will be refreshed
1739 with the new cumulative patch, and the folded patches will
1739 with the new cumulative patch, and the folded patches will
1740 be deleted. With -k/--keep, the folded patch files will not
1740 be deleted. With -k/--keep, the folded patch files will not
1741 be removed afterwards.
1741 be removed afterwards.
1742
1742
1743 The header for each folded patch will be concatenated with
1743 The header for each folded patch will be concatenated with
1744 the current patch header, separated by a line of '* * *'."""
1744 the current patch header, separated by a line of '* * *'."""
1745
1745
1746 q = repo.mq
1746 q = repo.mq
1747
1747
1748 if not files:
1748 if not files:
1749 raise util.Abort(_('qfold requires at least one patch name'))
1749 raise util.Abort(_('qfold requires at least one patch name'))
1750 if not q.check_toppatch(repo):
1750 if not q.check_toppatch(repo):
1751 raise util.Abort(_('No patches applied'))
1751 raise util.Abort(_('No patches applied'))
1752
1752
1753 message = cmdutil.logmessage(opts)
1753 message = cmdutil.logmessage(opts)
1754 if opts['edit']:
1754 if opts['edit']:
1755 if message:
1755 if message:
1756 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1756 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1757
1757
1758 parent = q.lookup('qtip')
1758 parent = q.lookup('qtip')
1759 patches = []
1759 patches = []
1760 messages = []
1760 messages = []
1761 for f in files:
1761 for f in files:
1762 p = q.lookup(f)
1762 p = q.lookup(f)
1763 if p in patches or p == parent:
1763 if p in patches or p == parent:
1764 ui.warn(_('Skipping already folded patch %s') % p)
1764 ui.warn(_('Skipping already folded patch %s') % p)
1765 if q.isapplied(p):
1765 if q.isapplied(p):
1766 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1766 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1767 patches.append(p)
1767 patches.append(p)
1768
1768
1769 for p in patches:
1769 for p in patches:
1770 if not message:
1770 if not message:
1771 messages.append(q.readheaders(p)[0])
1771 messages.append(q.readheaders(p)[0])
1772 pf = q.join(p)
1772 pf = q.join(p)
1773 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1773 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1774 if not patchsuccess:
1774 if not patchsuccess:
1775 raise util.Abort(_('Error folding patch %s') % p)
1775 raise util.Abort(_('Error folding patch %s') % p)
1776 patch.updatedir(ui, repo, files)
1776 patch.updatedir(ui, repo, files)
1777
1777
1778 if not message:
1778 if not message:
1779 message, comments, user = q.readheaders(parent)[0:3]
1779 message, comments, user = q.readheaders(parent)[0:3]
1780 for msg in messages:
1780 for msg in messages:
1781 message.append('* * *')
1781 message.append('* * *')
1782 message.extend(msg)
1782 message.extend(msg)
1783 message = '\n'.join(message)
1783 message = '\n'.join(message)
1784
1784
1785 if opts['edit']:
1785 if opts['edit']:
1786 message = ui.edit(message, user or ui.username())
1786 message = ui.edit(message, user or ui.username())
1787
1787
1788 q.refresh(repo, msg=message)
1788 q.refresh(repo, msg=message)
1789 q.delete(repo, patches, opts)
1789 q.delete(repo, patches, opts)
1790 q.save_dirty()
1790 q.save_dirty()
1791
1791
1792 def goto(ui, repo, patch, **opts):
1792 def goto(ui, repo, patch, **opts):
1793 '''push or pop patches until named patch is at top of stack'''
1793 '''push or pop patches until named patch is at top of stack'''
1794 q = repo.mq
1794 q = repo.mq
1795 patch = q.lookup(patch)
1795 patch = q.lookup(patch)
1796 if q.isapplied(patch):
1796 if q.isapplied(patch):
1797 ret = q.pop(repo, patch, force=opts['force'])
1797 ret = q.pop(repo, patch, force=opts['force'])
1798 else:
1798 else:
1799 ret = q.push(repo, patch, force=opts['force'])
1799 ret = q.push(repo, patch, force=opts['force'])
1800 q.save_dirty()
1800 q.save_dirty()
1801 return ret
1801 return ret
1802
1802
1803 def guard(ui, repo, *args, **opts):
1803 def guard(ui, repo, *args, **opts):
1804 '''set or print guards for a patch
1804 '''set or print guards for a patch
1805
1805
1806 Guards control whether a patch can be pushed. A patch with no
1806 Guards control whether a patch can be pushed. A patch with no
1807 guards is always pushed. A patch with a positive guard ("+foo") is
1807 guards is always pushed. A patch with a positive guard ("+foo") is
1808 pushed only if the qselect command has activated it. A patch with
1808 pushed only if the qselect command has activated it. A patch with
1809 a negative guard ("-foo") is never pushed if the qselect command
1809 a negative guard ("-foo") is never pushed if the qselect command
1810 has activated it.
1810 has activated it.
1811
1811
1812 With no arguments, print the currently active guards.
1812 With no arguments, print the currently active guards.
1813 With arguments, set guards for the named patch.
1813 With arguments, set guards for the named patch.
1814
1814
1815 To set a negative guard "-foo" on topmost patch ("--" is needed so
1815 To set a negative guard "-foo" on topmost patch ("--" is needed so
1816 hg will not interpret "-foo" as an option):
1816 hg will not interpret "-foo" as an option):
1817 hg qguard -- -foo
1817 hg qguard -- -foo
1818
1818
1819 To set guards on another patch:
1819 To set guards on another patch:
1820 hg qguard other.patch +2.6.17 -stable
1820 hg qguard other.patch +2.6.17 -stable
1821 '''
1821 '''
1822 def status(idx):
1822 def status(idx):
1823 guards = q.series_guards[idx] or ['unguarded']
1823 guards = q.series_guards[idx] or ['unguarded']
1824 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1824 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1825 q = repo.mq
1825 q = repo.mq
1826 patch = None
1826 patch = None
1827 args = list(args)
1827 args = list(args)
1828 if opts['list']:
1828 if opts['list']:
1829 if args or opts['none']:
1829 if args or opts['none']:
1830 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1830 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1831 for i in xrange(len(q.series)):
1831 for i in xrange(len(q.series)):
1832 status(i)
1832 status(i)
1833 return
1833 return
1834 if not args or args[0][0:1] in '-+':
1834 if not args or args[0][0:1] in '-+':
1835 if not q.applied:
1835 if not q.applied:
1836 raise util.Abort(_('no patches applied'))
1836 raise util.Abort(_('no patches applied'))
1837 patch = q.applied[-1].name
1837 patch = q.applied[-1].name
1838 if patch is None and args[0][0:1] not in '-+':
1838 if patch is None and args[0][0:1] not in '-+':
1839 patch = args.pop(0)
1839 patch = args.pop(0)
1840 if patch is None:
1840 if patch is None:
1841 raise util.Abort(_('no patch to work with'))
1841 raise util.Abort(_('no patch to work with'))
1842 if args or opts['none']:
1842 if args or opts['none']:
1843 idx = q.find_series(patch)
1843 idx = q.find_series(patch)
1844 if idx is None:
1844 if idx is None:
1845 raise util.Abort(_('no patch named %s') % patch)
1845 raise util.Abort(_('no patch named %s') % patch)
1846 q.set_guards(idx, args)
1846 q.set_guards(idx, args)
1847 q.save_dirty()
1847 q.save_dirty()
1848 else:
1848 else:
1849 status(q.series.index(q.lookup(patch)))
1849 status(q.series.index(q.lookup(patch)))
1850
1850
1851 def header(ui, repo, patch=None):
1851 def header(ui, repo, patch=None):
1852 """Print the header of the topmost or specified patch"""
1852 """Print the header of the topmost or specified patch"""
1853 q = repo.mq
1853 q = repo.mq
1854
1854
1855 if patch:
1855 if patch:
1856 patch = q.lookup(patch)
1856 patch = q.lookup(patch)
1857 else:
1857 else:
1858 if not q.applied:
1858 if not q.applied:
1859 ui.write('No patches applied\n')
1859 ui.write('No patches applied\n')
1860 return 1
1860 return 1
1861 patch = q.lookup('qtip')
1861 patch = q.lookup('qtip')
1862 message = repo.mq.readheaders(patch)[0]
1862 message = repo.mq.readheaders(patch)[0]
1863
1863
1864 ui.write('\n'.join(message) + '\n')
1864 ui.write('\n'.join(message) + '\n')
1865
1865
1866 def lastsavename(path):
1866 def lastsavename(path):
1867 (directory, base) = os.path.split(path)
1867 (directory, base) = os.path.split(path)
1868 names = os.listdir(directory)
1868 names = os.listdir(directory)
1869 namere = re.compile("%s.([0-9]+)" % base)
1869 namere = re.compile("%s.([0-9]+)" % base)
1870 maxindex = None
1870 maxindex = None
1871 maxname = None
1871 maxname = None
1872 for f in names:
1872 for f in names:
1873 m = namere.match(f)
1873 m = namere.match(f)
1874 if m:
1874 if m:
1875 index = int(m.group(1))
1875 index = int(m.group(1))
1876 if maxindex == None or index > maxindex:
1876 if maxindex == None or index > maxindex:
1877 maxindex = index
1877 maxindex = index
1878 maxname = f
1878 maxname = f
1879 if maxname:
1879 if maxname:
1880 return (os.path.join(directory, maxname), maxindex)
1880 return (os.path.join(directory, maxname), maxindex)
1881 return (None, None)
1881 return (None, None)
1882
1882
1883 def savename(path):
1883 def savename(path):
1884 (last, index) = lastsavename(path)
1884 (last, index) = lastsavename(path)
1885 if last is None:
1885 if last is None:
1886 index = 0
1886 index = 0
1887 newpath = path + ".%d" % (index + 1)
1887 newpath = path + ".%d" % (index + 1)
1888 return newpath
1888 return newpath
1889
1889
1890 def push(ui, repo, patch=None, **opts):
1890 def push(ui, repo, patch=None, **opts):
1891 """push the next patch onto the stack"""
1891 """push the next patch onto the stack"""
1892 q = repo.mq
1892 q = repo.mq
1893 mergeq = None
1893 mergeq = None
1894
1894
1895 if opts['all']:
1895 if opts['all']:
1896 if not q.series:
1896 if not q.series:
1897 ui.warn(_('no patches in series\n'))
1897 ui.warn(_('no patches in series\n'))
1898 return 0
1898 return 0
1899 patch = q.series[-1]
1899 patch = q.series[-1]
1900 if opts['merge']:
1900 if opts['merge']:
1901 if opts['name']:
1901 if opts['name']:
1902 newpath = opts['name']
1902 newpath = opts['name']
1903 else:
1903 else:
1904 newpath, i = lastsavename(q.path)
1904 newpath, i = lastsavename(q.path)
1905 if not newpath:
1905 if not newpath:
1906 ui.warn("no saved queues found, please use -n\n")
1906 ui.warn("no saved queues found, please use -n\n")
1907 return 1
1907 return 1
1908 mergeq = queue(ui, repo.join(""), newpath)
1908 mergeq = queue(ui, repo.join(""), newpath)
1909 ui.warn("merging with queue at: %s\n" % mergeq.path)
1909 ui.warn("merging with queue at: %s\n" % mergeq.path)
1910 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1910 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1911 mergeq=mergeq)
1911 mergeq=mergeq)
1912 return ret
1912 return ret
1913
1913
1914 def pop(ui, repo, patch=None, **opts):
1914 def pop(ui, repo, patch=None, **opts):
1915 """pop the current patch off the stack"""
1915 """pop the current patch off the stack"""
1916 localupdate = True
1916 localupdate = True
1917 if opts['name']:
1917 if opts['name']:
1918 q = queue(ui, repo.join(""), repo.join(opts['name']))
1918 q = queue(ui, repo.join(""), repo.join(opts['name']))
1919 ui.warn('using patch queue: %s\n' % q.path)
1919 ui.warn('using patch queue: %s\n' % q.path)
1920 localupdate = False
1920 localupdate = False
1921 else:
1921 else:
1922 q = repo.mq
1922 q = repo.mq
1923 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1923 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1924 all=opts['all'])
1924 all=opts['all'])
1925 q.save_dirty()
1925 q.save_dirty()
1926 return ret
1926 return ret
1927
1927
1928 def rename(ui, repo, patch, name=None, **opts):
1928 def rename(ui, repo, patch, name=None, **opts):
1929 """rename a patch
1929 """rename a patch
1930
1930
1931 With one argument, renames the current patch to PATCH1.
1931 With one argument, renames the current patch to PATCH1.
1932 With two arguments, renames PATCH1 to PATCH2."""
1932 With two arguments, renames PATCH1 to PATCH2."""
1933
1933
1934 q = repo.mq
1934 q = repo.mq
1935
1935
1936 if not name:
1936 if not name:
1937 name = patch
1937 name = patch
1938 patch = None
1938 patch = None
1939
1939
1940 if patch:
1940 if patch:
1941 patch = q.lookup(patch)
1941 patch = q.lookup(patch)
1942 else:
1942 else:
1943 if not q.applied:
1943 if not q.applied:
1944 ui.write(_('No patches applied\n'))
1944 ui.write(_('No patches applied\n'))
1945 return
1945 return
1946 patch = q.lookup('qtip')
1946 patch = q.lookup('qtip')
1947 absdest = q.join(name)
1947 absdest = q.join(name)
1948 if os.path.isdir(absdest):
1948 if os.path.isdir(absdest):
1949 name = normname(os.path.join(name, os.path.basename(patch)))
1949 name = normname(os.path.join(name, os.path.basename(patch)))
1950 absdest = q.join(name)
1950 absdest = q.join(name)
1951 if os.path.exists(absdest):
1951 if os.path.exists(absdest):
1952 raise util.Abort(_('%s already exists') % absdest)
1952 raise util.Abort(_('%s already exists') % absdest)
1953
1953
1954 if name in q.series:
1954 if name in q.series:
1955 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1955 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1956
1956
1957 if ui.verbose:
1957 if ui.verbose:
1958 ui.write('Renaming %s to %s\n' % (patch, name))
1958 ui.write('Renaming %s to %s\n' % (patch, name))
1959 i = q.find_series(patch)
1959 i = q.find_series(patch)
1960 guards = q.guard_re.findall(q.full_series[i])
1960 guards = q.guard_re.findall(q.full_series[i])
1961 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1961 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1962 q.parse_series()
1962 q.parse_series()
1963 q.series_dirty = 1
1963 q.series_dirty = 1
1964
1964
1965 info = q.isapplied(patch)
1965 info = q.isapplied(patch)
1966 if info:
1966 if info:
1967 q.applied[info[0]] = statusentry(info[1], name)
1967 q.applied[info[0]] = statusentry(info[1], name)
1968 q.applied_dirty = 1
1968 q.applied_dirty = 1
1969
1969
1970 util.rename(q.join(patch), absdest)
1970 util.rename(q.join(patch), absdest)
1971 r = q.qrepo()
1971 r = q.qrepo()
1972 if r:
1972 if r:
1973 wlock = r.wlock()
1973 wlock = r.wlock()
1974 try:
1974 try:
1975 if r.dirstate[name] == 'r':
1975 if r.dirstate[name] == 'r':
1976 r.undelete([name])
1976 r.undelete([name])
1977 r.copy(patch, name)
1977 r.copy(patch, name)
1978 r.remove([patch], False)
1978 r.remove([patch], False)
1979 finally:
1979 finally:
1980 del wlock
1980 del wlock
1981
1981
1982 q.save_dirty()
1982 q.save_dirty()
1983
1983
1984 def restore(ui, repo, rev, **opts):
1984 def restore(ui, repo, rev, **opts):
1985 """restore the queue state saved by a rev"""
1985 """restore the queue state saved by a rev"""
1986 rev = repo.lookup(rev)
1986 rev = repo.lookup(rev)
1987 q = repo.mq
1987 q = repo.mq
1988 q.restore(repo, rev, delete=opts['delete'],
1988 q.restore(repo, rev, delete=opts['delete'],
1989 qupdate=opts['update'])
1989 qupdate=opts['update'])
1990 q.save_dirty()
1990 q.save_dirty()
1991 return 0
1991 return 0
1992
1992
1993 def save(ui, repo, **opts):
1993 def save(ui, repo, **opts):
1994 """save current queue state"""
1994 """save current queue state"""
1995 q = repo.mq
1995 q = repo.mq
1996 message = cmdutil.logmessage(opts)
1996 message = cmdutil.logmessage(opts)
1997 ret = q.save(repo, msg=message)
1997 ret = q.save(repo, msg=message)
1998 if ret:
1998 if ret:
1999 return ret
1999 return ret
2000 q.save_dirty()
2000 q.save_dirty()
2001 if opts['copy']:
2001 if opts['copy']:
2002 path = q.path
2002 path = q.path
2003 if opts['name']:
2003 if opts['name']:
2004 newpath = os.path.join(q.basepath, opts['name'])
2004 newpath = os.path.join(q.basepath, opts['name'])
2005 if os.path.exists(newpath):
2005 if os.path.exists(newpath):
2006 if not os.path.isdir(newpath):
2006 if not os.path.isdir(newpath):
2007 raise util.Abort(_('destination %s exists and is not '
2007 raise util.Abort(_('destination %s exists and is not '
2008 'a directory') % newpath)
2008 'a directory') % newpath)
2009 if not opts['force']:
2009 if not opts['force']:
2010 raise util.Abort(_('destination %s exists, '
2010 raise util.Abort(_('destination %s exists, '
2011 'use -f to force') % newpath)
2011 'use -f to force') % newpath)
2012 else:
2012 else:
2013 newpath = savename(path)
2013 newpath = savename(path)
2014 ui.warn("copy %s to %s\n" % (path, newpath))
2014 ui.warn("copy %s to %s\n" % (path, newpath))
2015 util.copyfiles(path, newpath)
2015 util.copyfiles(path, newpath)
2016 if opts['empty']:
2016 if opts['empty']:
2017 try:
2017 try:
2018 os.unlink(q.join(q.status_path))
2018 os.unlink(q.join(q.status_path))
2019 except:
2019 except:
2020 pass
2020 pass
2021 return 0
2021 return 0
2022
2022
2023 def strip(ui, repo, rev, **opts):
2023 def strip(ui, repo, rev, **opts):
2024 """strip a revision and all later revs on the same branch"""
2024 """strip a revision and all later revs on the same branch"""
2025 rev = repo.lookup(rev)
2025 rev = repo.lookup(rev)
2026 backup = 'all'
2026 backup = 'all'
2027 if opts['backup']:
2027 if opts['backup']:
2028 backup = 'strip'
2028 backup = 'strip'
2029 elif opts['nobackup']:
2029 elif opts['nobackup']:
2030 backup = 'none'
2030 backup = 'none'
2031 update = repo.dirstate.parents()[0] != revlog.nullid
2031 update = repo.dirstate.parents()[0] != revlog.nullid
2032 repo.mq.strip(repo, rev, backup=backup, update=update)
2032 repo.mq.strip(repo, rev, backup=backup, update=update)
2033 return 0
2033 return 0
2034
2034
2035 def select(ui, repo, *args, **opts):
2035 def select(ui, repo, *args, **opts):
2036 '''set or print guarded patches to push
2036 '''set or print guarded patches to push
2037
2037
2038 Use the qguard command to set or print guards on patch, then use
2038 Use the qguard command to set or print guards on patch, then use
2039 qselect to tell mq which guards to use. A patch will be pushed if it
2039 qselect to tell mq which guards to use. A patch will be pushed if it
2040 has no guards or any positive guards match the currently selected guard,
2040 has no guards or any positive guards match the currently selected guard,
2041 but will not be pushed if any negative guards match the current guard.
2041 but will not be pushed if any negative guards match the current guard.
2042 For example:
2042 For example:
2043
2043
2044 qguard foo.patch -stable (negative guard)
2044 qguard foo.patch -stable (negative guard)
2045 qguard bar.patch +stable (positive guard)
2045 qguard bar.patch +stable (positive guard)
2046 qselect stable
2046 qselect stable
2047
2047
2048 This activates the "stable" guard. mq will skip foo.patch (because
2048 This activates the "stable" guard. mq will skip foo.patch (because
2049 it has a negative match) but push bar.patch (because it
2049 it has a negative match) but push bar.patch (because it
2050 has a positive match).
2050 has a positive match).
2051
2051
2052 With no arguments, prints the currently active guards.
2052 With no arguments, prints the currently active guards.
2053 With one argument, sets the active guard.
2053 With one argument, sets the active guard.
2054
2054
2055 Use -n/--none to deactivate guards (no other arguments needed).
2055 Use -n/--none to deactivate guards (no other arguments needed).
2056 When no guards are active, patches with positive guards are skipped
2056 When no guards are active, patches with positive guards are skipped
2057 and patches with negative guards are pushed.
2057 and patches with negative guards are pushed.
2058
2058
2059 qselect can change the guards on applied patches. It does not pop
2059 qselect can change the guards on applied patches. It does not pop
2060 guarded patches by default. Use --pop to pop back to the last applied
2060 guarded patches by default. Use --pop to pop back to the last applied
2061 patch that is not guarded. Use --reapply (which implies --pop) to push
2061 patch that is not guarded. Use --reapply (which implies --pop) to push
2062 back to the current patch afterwards, but skip guarded patches.
2062 back to the current patch afterwards, but skip guarded patches.
2063
2063
2064 Use -s/--series to print a list of all guards in the series file (no
2064 Use -s/--series to print a list of all guards in the series file (no
2065 other arguments needed). Use -v for more information.'''
2065 other arguments needed). Use -v for more information.'''
2066
2066
2067 q = repo.mq
2067 q = repo.mq
2068 guards = q.active()
2068 guards = q.active()
2069 if args or opts['none']:
2069 if args or opts['none']:
2070 old_unapplied = q.unapplied(repo)
2070 old_unapplied = q.unapplied(repo)
2071 old_guarded = [i for i in xrange(len(q.applied)) if
2071 old_guarded = [i for i in xrange(len(q.applied)) if
2072 not q.pushable(i)[0]]
2072 not q.pushable(i)[0]]
2073 q.set_active(args)
2073 q.set_active(args)
2074 q.save_dirty()
2074 q.save_dirty()
2075 if not args:
2075 if not args:
2076 ui.status(_('guards deactivated\n'))
2076 ui.status(_('guards deactivated\n'))
2077 if not opts['pop'] and not opts['reapply']:
2077 if not opts['pop'] and not opts['reapply']:
2078 unapplied = q.unapplied(repo)
2078 unapplied = q.unapplied(repo)
2079 guarded = [i for i in xrange(len(q.applied))
2079 guarded = [i for i in xrange(len(q.applied))
2080 if not q.pushable(i)[0]]
2080 if not q.pushable(i)[0]]
2081 if len(unapplied) != len(old_unapplied):
2081 if len(unapplied) != len(old_unapplied):
2082 ui.status(_('number of unguarded, unapplied patches has '
2082 ui.status(_('number of unguarded, unapplied patches has '
2083 'changed from %d to %d\n') %
2083 'changed from %d to %d\n') %
2084 (len(old_unapplied), len(unapplied)))
2084 (len(old_unapplied), len(unapplied)))
2085 if len(guarded) != len(old_guarded):
2085 if len(guarded) != len(old_guarded):
2086 ui.status(_('number of guarded, applied patches has changed '
2086 ui.status(_('number of guarded, applied patches has changed '
2087 'from %d to %d\n') %
2087 'from %d to %d\n') %
2088 (len(old_guarded), len(guarded)))
2088 (len(old_guarded), len(guarded)))
2089 elif opts['series']:
2089 elif opts['series']:
2090 guards = {}
2090 guards = {}
2091 noguards = 0
2091 noguards = 0
2092 for gs in q.series_guards:
2092 for gs in q.series_guards:
2093 if not gs:
2093 if not gs:
2094 noguards += 1
2094 noguards += 1
2095 for g in gs:
2095 for g in gs:
2096 guards.setdefault(g, 0)
2096 guards.setdefault(g, 0)
2097 guards[g] += 1
2097 guards[g] += 1
2098 if ui.verbose:
2098 if ui.verbose:
2099 guards['NONE'] = noguards
2099 guards['NONE'] = noguards
2100 guards = guards.items()
2100 guards = guards.items()
2101 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2101 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2102 if guards:
2102 if guards:
2103 ui.note(_('guards in series file:\n'))
2103 ui.note(_('guards in series file:\n'))
2104 for guard, count in guards:
2104 for guard, count in guards:
2105 ui.note('%2d ' % count)
2105 ui.note('%2d ' % count)
2106 ui.write(guard, '\n')
2106 ui.write(guard, '\n')
2107 else:
2107 else:
2108 ui.note(_('no guards in series file\n'))
2108 ui.note(_('no guards in series file\n'))
2109 else:
2109 else:
2110 if guards:
2110 if guards:
2111 ui.note(_('active guards:\n'))
2111 ui.note(_('active guards:\n'))
2112 for g in guards:
2112 for g in guards:
2113 ui.write(g, '\n')
2113 ui.write(g, '\n')
2114 else:
2114 else:
2115 ui.write(_('no active guards\n'))
2115 ui.write(_('no active guards\n'))
2116 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2116 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2117 popped = False
2117 popped = False
2118 if opts['pop'] or opts['reapply']:
2118 if opts['pop'] or opts['reapply']:
2119 for i in xrange(len(q.applied)):
2119 for i in xrange(len(q.applied)):
2120 pushable, reason = q.pushable(i)
2120 pushable, reason = q.pushable(i)
2121 if not pushable:
2121 if not pushable:
2122 ui.status(_('popping guarded patches\n'))
2122 ui.status(_('popping guarded patches\n'))
2123 popped = True
2123 popped = True
2124 if i == 0:
2124 if i == 0:
2125 q.pop(repo, all=True)
2125 q.pop(repo, all=True)
2126 else:
2126 else:
2127 q.pop(repo, i-1)
2127 q.pop(repo, i-1)
2128 break
2128 break
2129 if popped:
2129 if popped:
2130 try:
2130 try:
2131 if reapply:
2131 if reapply:
2132 ui.status(_('reapplying unguarded patches\n'))
2132 ui.status(_('reapplying unguarded patches\n'))
2133 q.push(repo, reapply)
2133 q.push(repo, reapply)
2134 finally:
2134 finally:
2135 q.save_dirty()
2135 q.save_dirty()
2136
2136
2137 def reposetup(ui, repo):
2137 def reposetup(ui, repo):
2138 class mqrepo(repo.__class__):
2138 class mqrepo(repo.__class__):
2139 def abort_if_wdir_patched(self, errmsg, force=False):
2139 def abort_if_wdir_patched(self, errmsg, force=False):
2140 if self.mq.applied and not force:
2140 if self.mq.applied and not force:
2141 parent = revlog.hex(self.dirstate.parents()[0])
2141 parent = revlog.hex(self.dirstate.parents()[0])
2142 if parent in [s.rev for s in self.mq.applied]:
2142 if parent in [s.rev for s in self.mq.applied]:
2143 raise util.Abort(errmsg)
2143 raise util.Abort(errmsg)
2144
2144
2145 def commit(self, *args, **opts):
2145 def commit(self, *args, **opts):
2146 if len(args) >= 6:
2146 if len(args) >= 6:
2147 force = args[5]
2147 force = args[5]
2148 else:
2148 else:
2149 force = opts.get('force')
2149 force = opts.get('force')
2150 self.abort_if_wdir_patched(
2150 self.abort_if_wdir_patched(
2151 _('cannot commit over an applied mq patch'),
2151 _('cannot commit over an applied mq patch'),
2152 force)
2152 force)
2153
2153
2154 return super(mqrepo, self).commit(*args, **opts)
2154 return super(mqrepo, self).commit(*args, **opts)
2155
2155
2156 def push(self, remote, force=False, revs=None):
2156 def push(self, remote, force=False, revs=None):
2157 if self.mq.applied and not force and not revs:
2157 if self.mq.applied and not force and not revs:
2158 raise util.Abort(_('source has mq patches applied'))
2158 raise util.Abort(_('source has mq patches applied'))
2159 return super(mqrepo, self).push(remote, force, revs)
2159 return super(mqrepo, self).push(remote, force, revs)
2160
2160
2161 def tags(self):
2161 def tags(self):
2162 if self.tagscache:
2162 if self.tagscache:
2163 return self.tagscache
2163 return self.tagscache
2164
2164
2165 tagscache = super(mqrepo, self).tags()
2165 tagscache = super(mqrepo, self).tags()
2166
2166
2167 q = self.mq
2167 q = self.mq
2168 if not q.applied:
2168 if not q.applied:
2169 return tagscache
2169 return tagscache
2170
2170
2171 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2171 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2172
2172
2173 if mqtags[-1][0] not in self.changelog.nodemap:
2173 if mqtags[-1][0] not in self.changelog.nodemap:
2174 self.ui.warn('mq status file refers to unknown node %s\n'
2174 self.ui.warn('mq status file refers to unknown node %s\n'
2175 % revlog.short(mqtags[-1][0]))
2175 % revlog.short(mqtags[-1][0]))
2176 return tagscache
2176 return tagscache
2177
2177
2178 mqtags.append((mqtags[-1][0], 'qtip'))
2178 mqtags.append((mqtags[-1][0], 'qtip'))
2179 mqtags.append((mqtags[0][0], 'qbase'))
2179 mqtags.append((mqtags[0][0], 'qbase'))
2180 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2180 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2181 for patch in mqtags:
2181 for patch in mqtags:
2182 if patch[1] in tagscache:
2182 if patch[1] in tagscache:
2183 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2183 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2184 else:
2184 else:
2185 tagscache[patch[1]] = patch[0]
2185 tagscache[patch[1]] = patch[0]
2186
2186
2187 return tagscache
2187 return tagscache
2188
2188
2189 def _branchtags(self):
2189 def _branchtags(self, partial, lrev):
2190 q = self.mq
2190 q = self.mq
2191 if not q.applied:
2191 if not q.applied:
2192 return super(mqrepo, self)._branchtags()
2192 return super(mqrepo, self)._branchtags(partial, lrev)
2193
2193
2194 cl = self.changelog
2194 cl = self.changelog
2195 qbasenode = revlog.bin(q.applied[0].rev)
2195 qbasenode = revlog.bin(q.applied[0].rev)
2196 if qbasenode not in cl.nodemap:
2196 if qbasenode not in cl.nodemap:
2197 self.ui.warn('mq status file refers to unknown node %s\n'
2197 self.ui.warn('mq status file refers to unknown node %s\n'
2198 % revlog.short(qbasenode))
2198 % revlog.short(qbasenode))
2199 return super(mqrepo, self)._branchtags()
2199 return super(mqrepo, self)._branchtags(partial, lrev)
2200
2201 self.branchcache = {} # avoid recursion in changectx
2202 partial, last, lrev = self._readbranchcache()
2203
2200
2204 qbase = cl.rev(qbasenode)
2201 qbase = cl.rev(qbasenode)
2205 start = lrev + 1
2202 start = lrev + 1
2206 if start < qbase:
2203 if start < qbase:
2207 # update the cache (excluding the patches) and save it
2204 # update the cache (excluding the patches) and save it
2208 self._updatebranchcache(partial, lrev+1, qbase)
2205 self._updatebranchcache(partial, lrev+1, qbase)
2209 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2206 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2210 start = qbase
2207 start = qbase
2211 # if start = qbase, the cache is as updated as it should be.
2208 # if start = qbase, the cache is as updated as it should be.
2212 # if start > qbase, the cache includes (part of) the patches.
2209 # if start > qbase, the cache includes (part of) the patches.
2213 # we might as well use it, but we won't save it.
2210 # we might as well use it, but we won't save it.
2214
2211
2215 # update the cache up to the tip
2212 # update the cache up to the tip
2216 self._updatebranchcache(partial, start, cl.count())
2213 self._updatebranchcache(partial, start, cl.count())
2217
2214
2218 return partial
2215 return partial
2219
2216
2220 if repo.local():
2217 if repo.local():
2221 repo.__class__ = mqrepo
2218 repo.__class__ = mqrepo
2222 repo.mq = queue(ui, repo.join(""))
2219 repo.mq = queue(ui, repo.join(""))
2223
2220
2224 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2221 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2225
2222
2226 headeropts = [
2223 headeropts = [
2227 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2224 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2228 ('u', 'user', '', _('add "From: <given user>" to patch')),
2225 ('u', 'user', '', _('add "From: <given user>" to patch')),
2229 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2226 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2230 ('d', 'date', '', _('add "Date: <given date>" to patch'))]
2227 ('d', 'date', '', _('add "Date: <given date>" to patch'))]
2231
2228
2232 cmdtable = {
2229 cmdtable = {
2233 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2230 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2234 "qclone":
2231 "qclone":
2235 (clone,
2232 (clone,
2236 [('', 'pull', None, _('use pull protocol to copy metadata')),
2233 [('', 'pull', None, _('use pull protocol to copy metadata')),
2237 ('U', 'noupdate', None, _('do not update the new working directories')),
2234 ('U', 'noupdate', None, _('do not update the new working directories')),
2238 ('', 'uncompressed', None,
2235 ('', 'uncompressed', None,
2239 _('use uncompressed transfer (fast over LAN)')),
2236 _('use uncompressed transfer (fast over LAN)')),
2240 ('p', 'patches', '', _('location of source patch repo')),
2237 ('p', 'patches', '', _('location of source patch repo')),
2241 ] + commands.remoteopts,
2238 ] + commands.remoteopts,
2242 _('hg qclone [OPTION]... SOURCE [DEST]')),
2239 _('hg qclone [OPTION]... SOURCE [DEST]')),
2243 "qcommit|qci":
2240 "qcommit|qci":
2244 (commit,
2241 (commit,
2245 commands.table["^commit|ci"][1],
2242 commands.table["^commit|ci"][1],
2246 _('hg qcommit [OPTION]... [FILE]...')),
2243 _('hg qcommit [OPTION]... [FILE]...')),
2247 "^qdiff":
2244 "^qdiff":
2248 (diff,
2245 (diff,
2249 [('g', 'git', None, _('use git extended diff format')),
2246 [('g', 'git', None, _('use git extended diff format')),
2250 ('U', 'unified', 3, _('number of lines of context to show')),
2247 ('U', 'unified', 3, _('number of lines of context to show')),
2251 ] + commands.walkopts,
2248 ] + commands.walkopts,
2252 _('hg qdiff [-I] [-X] [-U NUM] [-g] [FILE]...')),
2249 _('hg qdiff [-I] [-X] [-U NUM] [-g] [FILE]...')),
2253 "qdelete|qremove|qrm":
2250 "qdelete|qremove|qrm":
2254 (delete,
2251 (delete,
2255 [('k', 'keep', None, _('keep patch file')),
2252 [('k', 'keep', None, _('keep patch file')),
2256 ('r', 'rev', [], _('stop managing a revision'))],
2253 ('r', 'rev', [], _('stop managing a revision'))],
2257 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2254 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2258 'qfold':
2255 'qfold':
2259 (fold,
2256 (fold,
2260 [('e', 'edit', None, _('edit patch header')),
2257 [('e', 'edit', None, _('edit patch header')),
2261 ('k', 'keep', None, _('keep folded patch files')),
2258 ('k', 'keep', None, _('keep folded patch files')),
2262 ] + commands.commitopts,
2259 ] + commands.commitopts,
2263 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2260 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2264 'qgoto':
2261 'qgoto':
2265 (goto,
2262 (goto,
2266 [('f', 'force', None, _('overwrite any local changes'))],
2263 [('f', 'force', None, _('overwrite any local changes'))],
2267 _('hg qgoto [OPTION]... PATCH')),
2264 _('hg qgoto [OPTION]... PATCH')),
2268 'qguard':
2265 'qguard':
2269 (guard,
2266 (guard,
2270 [('l', 'list', None, _('list all patches and guards')),
2267 [('l', 'list', None, _('list all patches and guards')),
2271 ('n', 'none', None, _('drop all guards'))],
2268 ('n', 'none', None, _('drop all guards'))],
2272 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2269 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2273 'qheader': (header, [], _('hg qheader [PATCH]')),
2270 'qheader': (header, [], _('hg qheader [PATCH]')),
2274 "^qimport":
2271 "^qimport":
2275 (qimport,
2272 (qimport,
2276 [('e', 'existing', None, 'import file in patch dir'),
2273 [('e', 'existing', None, 'import file in patch dir'),
2277 ('n', 'name', '', 'patch file name'),
2274 ('n', 'name', '', 'patch file name'),
2278 ('f', 'force', None, 'overwrite existing files'),
2275 ('f', 'force', None, 'overwrite existing files'),
2279 ('r', 'rev', [], 'place existing revisions under mq control'),
2276 ('r', 'rev', [], 'place existing revisions under mq control'),
2280 ('g', 'git', None, _('use git extended diff format'))],
2277 ('g', 'git', None, _('use git extended diff format'))],
2281 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2278 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2282 "^qinit":
2279 "^qinit":
2283 (init,
2280 (init,
2284 [('c', 'create-repo', None, 'create queue repository')],
2281 [('c', 'create-repo', None, 'create queue repository')],
2285 _('hg qinit [-c]')),
2282 _('hg qinit [-c]')),
2286 "qnew":
2283 "qnew":
2287 (new,
2284 (new,
2288 [('e', 'edit', None, _('edit commit message')),
2285 [('e', 'edit', None, _('edit commit message')),
2289 ('f', 'force', None, _('import uncommitted changes into patch')),
2286 ('f', 'force', None, _('import uncommitted changes into patch')),
2290 ('g', 'git', None, _('use git extended diff format')),
2287 ('g', 'git', None, _('use git extended diff format')),
2291 ] + commands.walkopts + commands.commitopts + headeropts,
2288 ] + commands.walkopts + commands.commitopts + headeropts,
2292 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2289 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2293 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2290 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2294 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2291 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2295 "^qpop":
2292 "^qpop":
2296 (pop,
2293 (pop,
2297 [('a', 'all', None, _('pop all patches')),
2294 [('a', 'all', None, _('pop all patches')),
2298 ('n', 'name', '', _('queue name to pop')),
2295 ('n', 'name', '', _('queue name to pop')),
2299 ('f', 'force', None, _('forget any local changes'))],
2296 ('f', 'force', None, _('forget any local changes'))],
2300 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2297 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2301 "^qpush":
2298 "^qpush":
2302 (push,
2299 (push,
2303 [('f', 'force', None, _('apply if the patch has rejects')),
2300 [('f', 'force', None, _('apply if the patch has rejects')),
2304 ('l', 'list', None, _('list patch name in commit text')),
2301 ('l', 'list', None, _('list patch name in commit text')),
2305 ('a', 'all', None, _('apply all patches')),
2302 ('a', 'all', None, _('apply all patches')),
2306 ('m', 'merge', None, _('merge from another queue')),
2303 ('m', 'merge', None, _('merge from another queue')),
2307 ('n', 'name', '', _('merge queue name'))],
2304 ('n', 'name', '', _('merge queue name'))],
2308 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2305 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2309 "^qrefresh":
2306 "^qrefresh":
2310 (refresh,
2307 (refresh,
2311 [('e', 'edit', None, _('edit commit message')),
2308 [('e', 'edit', None, _('edit commit message')),
2312 ('g', 'git', None, _('use git extended diff format')),
2309 ('g', 'git', None, _('use git extended diff format')),
2313 ('s', 'short', None, _('refresh only files already in the patch')),
2310 ('s', 'short', None, _('refresh only files already in the patch')),
2314 ] + commands.walkopts + commands.commitopts + headeropts,
2311 ] + commands.walkopts + commands.commitopts + headeropts,
2315 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2312 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2316 'qrename|qmv':
2313 'qrename|qmv':
2317 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2314 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2318 "qrestore":
2315 "qrestore":
2319 (restore,
2316 (restore,
2320 [('d', 'delete', None, _('delete save entry')),
2317 [('d', 'delete', None, _('delete save entry')),
2321 ('u', 'update', None, _('update queue working dir'))],
2318 ('u', 'update', None, _('update queue working dir'))],
2322 _('hg qrestore [-d] [-u] REV')),
2319 _('hg qrestore [-d] [-u] REV')),
2323 "qsave":
2320 "qsave":
2324 (save,
2321 (save,
2325 [('c', 'copy', None, _('copy patch directory')),
2322 [('c', 'copy', None, _('copy patch directory')),
2326 ('n', 'name', '', _('copy directory name')),
2323 ('n', 'name', '', _('copy directory name')),
2327 ('e', 'empty', None, _('clear queue status file')),
2324 ('e', 'empty', None, _('clear queue status file')),
2328 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2325 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2329 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2326 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2330 "qselect":
2327 "qselect":
2331 (select,
2328 (select,
2332 [('n', 'none', None, _('disable all guards')),
2329 [('n', 'none', None, _('disable all guards')),
2333 ('s', 'series', None, _('list all guards in series file')),
2330 ('s', 'series', None, _('list all guards in series file')),
2334 ('', 'pop', None, _('pop to before first guarded applied patch')),
2331 ('', 'pop', None, _('pop to before first guarded applied patch')),
2335 ('', 'reapply', None, _('pop, then reapply patches'))],
2332 ('', 'reapply', None, _('pop, then reapply patches'))],
2336 _('hg qselect [OPTION]... [GUARD]...')),
2333 _('hg qselect [OPTION]... [GUARD]...')),
2337 "qseries":
2334 "qseries":
2338 (series,
2335 (series,
2339 [('m', 'missing', None, _('print patches not in series')),
2336 [('m', 'missing', None, _('print patches not in series')),
2340 ] + seriesopts,
2337 ] + seriesopts,
2341 _('hg qseries [-ms]')),
2338 _('hg qseries [-ms]')),
2342 "^strip":
2339 "^strip":
2343 (strip,
2340 (strip,
2344 [('f', 'force', None, _('force multi-head removal')),
2341 [('f', 'force', None, _('force multi-head removal')),
2345 ('b', 'backup', None, _('bundle unrelated changesets')),
2342 ('b', 'backup', None, _('bundle unrelated changesets')),
2346 ('n', 'nobackup', None, _('no backups'))],
2343 ('n', 'nobackup', None, _('no backups'))],
2347 _('hg strip [-f] [-b] [-n] REV')),
2344 _('hg strip [-f] [-b] [-n] REV')),
2348 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2345 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2349 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2346 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2350 }
2347 }
@@ -1,2102 +1,2117
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
97 self.nodetagscache = None
99 self.nodetagscache = None
98 self.filterpats = {}
100 self.filterpats = {}
99 self._datafilters = {}
101 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
101
103
102 def __getattr__(self, name):
104 def __getattr__(self, name):
103 if name == 'changelog':
105 if name == 'changelog':
104 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
105 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
106 return self.changelog
108 return self.changelog
107 if name == 'manifest':
109 if name == 'manifest':
108 self.changelog
110 self.changelog
109 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
110 return self.manifest
112 return self.manifest
111 if name == 'dirstate':
113 if name == 'dirstate':
112 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
113 return self.dirstate
115 return self.dirstate
114 else:
116 else:
115 raise AttributeError, name
117 raise AttributeError, name
116
118
117 def url(self):
119 def url(self):
118 return 'file:' + self.root
120 return 'file:' + self.root
119
121
120 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
121 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
122
124
123 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
124
126
125 def _tag(self, name, node, message, local, user, date, parent=None,
127 def _tag(self, name, node, message, local, user, date, parent=None,
126 extra={}):
128 extra={}):
127 use_dirstate = parent is None
129 use_dirstate = parent is None
128
130
129 for c in self.tag_disallowed:
131 for c in self.tag_disallowed:
130 if c in name:
132 if c in name:
131 raise util.Abort(_('%r cannot be used in a tag name') % c)
133 raise util.Abort(_('%r cannot be used in a tag name') % c)
132
134
133 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
134
136
135 def writetag(fp, name, munge, prevtags):
137 def writetag(fp, name, munge, prevtags):
136 fp.seek(0, 2)
138 fp.seek(0, 2)
137 if prevtags and prevtags[-1] != '\n':
139 if prevtags and prevtags[-1] != '\n':
138 fp.write('\n')
140 fp.write('\n')
139 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
140 fp.close()
142 fp.close()
141
143
142 prevtags = ''
144 prevtags = ''
143 if local:
145 if local:
144 try:
146 try:
145 fp = self.opener('localtags', 'r+')
147 fp = self.opener('localtags', 'r+')
146 except IOError, err:
148 except IOError, err:
147 fp = self.opener('localtags', 'a')
149 fp = self.opener('localtags', 'a')
148 else:
150 else:
149 prevtags = fp.read()
151 prevtags = fp.read()
150
152
151 # local tags are stored in the current charset
153 # local tags are stored in the current charset
152 writetag(fp, name, None, prevtags)
154 writetag(fp, name, None, prevtags)
153 self.hook('tag', node=hex(node), tag=name, local=local)
155 self.hook('tag', node=hex(node), tag=name, local=local)
154 return
156 return
155
157
156 if use_dirstate:
158 if use_dirstate:
157 try:
159 try:
158 fp = self.wfile('.hgtags', 'rb+')
160 fp = self.wfile('.hgtags', 'rb+')
159 except IOError, err:
161 except IOError, err:
160 fp = self.wfile('.hgtags', 'ab')
162 fp = self.wfile('.hgtags', 'ab')
161 else:
163 else:
162 prevtags = fp.read()
164 prevtags = fp.read()
163 else:
165 else:
164 try:
166 try:
165 prevtags = self.filectx('.hgtags', parent).data()
167 prevtags = self.filectx('.hgtags', parent).data()
166 except revlog.LookupError:
168 except revlog.LookupError:
167 pass
169 pass
168 fp = self.wfile('.hgtags', 'wb')
170 fp = self.wfile('.hgtags', 'wb')
169 if prevtags:
171 if prevtags:
170 fp.write(prevtags)
172 fp.write(prevtags)
171
173
172 # committed tags are stored in UTF-8
174 # committed tags are stored in UTF-8
173 writetag(fp, name, util.fromlocal, prevtags)
175 writetag(fp, name, util.fromlocal, prevtags)
174
176
175 if use_dirstate and '.hgtags' not in self.dirstate:
177 if use_dirstate and '.hgtags' not in self.dirstate:
176 self.add(['.hgtags'])
178 self.add(['.hgtags'])
177
179
178 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
179 extra=extra)
181 extra=extra)
180
182
181 self.hook('tag', node=hex(node), tag=name, local=local)
183 self.hook('tag', node=hex(node), tag=name, local=local)
182
184
183 return tagnode
185 return tagnode
184
186
185 def tag(self, name, node, message, local, user, date):
187 def tag(self, name, node, message, local, user, date):
186 '''tag a revision with a symbolic name.
188 '''tag a revision with a symbolic name.
187
189
188 if local is True, the tag is stored in a per-repository file.
190 if local is True, the tag is stored in a per-repository file.
189 otherwise, it is stored in the .hgtags file, and a new
191 otherwise, it is stored in the .hgtags file, and a new
190 changeset is committed with the change.
192 changeset is committed with the change.
191
193
192 keyword arguments:
194 keyword arguments:
193
195
194 local: whether to store tag in non-version-controlled file
196 local: whether to store tag in non-version-controlled file
195 (default False)
197 (default False)
196
198
197 message: commit message to use if committing
199 message: commit message to use if committing
198
200
199 user: name of user to use if committing
201 user: name of user to use if committing
200
202
201 date: date tuple to use if committing'''
203 date: date tuple to use if committing'''
202
204
203 for x in self.status()[:5]:
205 for x in self.status()[:5]:
204 if '.hgtags' in x:
206 if '.hgtags' in x:
205 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
206 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
207
209
208
210
209 self._tag(name, node, message, local, user, date)
211 self._tag(name, node, message, local, user, date)
210
212
211 def tags(self):
213 def tags(self):
212 '''return a mapping of tag to node'''
214 '''return a mapping of tag to node'''
213 if self.tagscache:
215 if self.tagscache:
214 return self.tagscache
216 return self.tagscache
215
217
216 globaltags = {}
218 globaltags = {}
217 tagtypes = {}
219 tagtypes = {}
218
220
219 def readtags(lines, fn, tagtype):
221 def readtags(lines, fn, tagtype):
220 filetags = {}
222 filetags = {}
221 count = 0
223 count = 0
222
224
223 def warn(msg):
225 def warn(msg):
224 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
225
227
226 for l in lines:
228 for l in lines:
227 count += 1
229 count += 1
228 if not l:
230 if not l:
229 continue
231 continue
230 s = l.split(" ", 1)
232 s = l.split(" ", 1)
231 if len(s) != 2:
233 if len(s) != 2:
232 warn(_("cannot parse entry"))
234 warn(_("cannot parse entry"))
233 continue
235 continue
234 node, key = s
236 node, key = s
235 key = util.tolocal(key.strip()) # stored in UTF-8
237 key = util.tolocal(key.strip()) # stored in UTF-8
236 try:
238 try:
237 bin_n = bin(node)
239 bin_n = bin(node)
238 except TypeError:
240 except TypeError:
239 warn(_("node '%s' is not well formed") % node)
241 warn(_("node '%s' is not well formed") % node)
240 continue
242 continue
241 if bin_n not in self.changelog.nodemap:
243 if bin_n not in self.changelog.nodemap:
242 warn(_("tag '%s' refers to unknown node") % key)
244 warn(_("tag '%s' refers to unknown node") % key)
243 continue
245 continue
244
246
245 h = []
247 h = []
246 if key in filetags:
248 if key in filetags:
247 n, h = filetags[key]
249 n, h = filetags[key]
248 h.append(n)
250 h.append(n)
249 filetags[key] = (bin_n, h)
251 filetags[key] = (bin_n, h)
250
252
251 for k, nh in filetags.items():
253 for k, nh in filetags.items():
252 if k not in globaltags:
254 if k not in globaltags:
253 globaltags[k] = nh
255 globaltags[k] = nh
254 tagtypes[k] = tagtype
256 tagtypes[k] = tagtype
255 continue
257 continue
256
258
257 # we prefer the global tag if:
259 # we prefer the global tag if:
258 # it supercedes us OR
260 # it supercedes us OR
259 # mutual supercedes and it has a higher rank
261 # mutual supercedes and it has a higher rank
260 # otherwise we win because we're tip-most
262 # otherwise we win because we're tip-most
261 an, ah = nh
263 an, ah = nh
262 bn, bh = globaltags[k]
264 bn, bh = globaltags[k]
263 if (bn != an and an in bh and
265 if (bn != an and an in bh and
264 (bn not in ah or len(bh) > len(ah))):
266 (bn not in ah or len(bh) > len(ah))):
265 an = bn
267 an = bn
266 ah.extend([n for n in bh if n not in ah])
268 ah.extend([n for n in bh if n not in ah])
267 globaltags[k] = an, ah
269 globaltags[k] = an, ah
268 tagtypes[k] = tagtype
270 tagtypes[k] = tagtype
269
271
270 # read the tags file from each head, ending with the tip
272 # read the tags file from each head, ending with the tip
271 f = None
273 f = None
272 for rev, node, fnode in self._hgtagsnodes():
274 for rev, node, fnode in self._hgtagsnodes():
273 f = (f and f.filectx(fnode) or
275 f = (f and f.filectx(fnode) or
274 self.filectx('.hgtags', fileid=fnode))
276 self.filectx('.hgtags', fileid=fnode))
275 readtags(f.data().splitlines(), f, "global")
277 readtags(f.data().splitlines(), f, "global")
276
278
277 try:
279 try:
278 data = util.fromlocal(self.opener("localtags").read())
280 data = util.fromlocal(self.opener("localtags").read())
279 # localtags are stored in the local character set
281 # localtags are stored in the local character set
280 # while the internal tag table is stored in UTF-8
282 # while the internal tag table is stored in UTF-8
281 readtags(data.splitlines(), "localtags", "local")
283 readtags(data.splitlines(), "localtags", "local")
282 except IOError:
284 except IOError:
283 pass
285 pass
284
286
285 self.tagscache = {}
287 self.tagscache = {}
286 self._tagstypecache = {}
288 self._tagstypecache = {}
287 for k,nh in globaltags.items():
289 for k,nh in globaltags.items():
288 n = nh[0]
290 n = nh[0]
289 if n != nullid:
291 if n != nullid:
290 self.tagscache[k] = n
292 self.tagscache[k] = n
291 self._tagstypecache[k] = tagtypes[k]
293 self._tagstypecache[k] = tagtypes[k]
292 self.tagscache['tip'] = self.changelog.tip()
294 self.tagscache['tip'] = self.changelog.tip()
293
295
294 return self.tagscache
296 return self.tagscache
295
297
296 def tagtype(self, tagname):
298 def tagtype(self, tagname):
297 '''
299 '''
298 return the type of the given tag. result can be:
300 return the type of the given tag. result can be:
299
301
300 'local' : a local tag
302 'local' : a local tag
301 'global' : a global tag
303 'global' : a global tag
302 None : tag does not exist
304 None : tag does not exist
303 '''
305 '''
304
306
305 self.tags()
307 self.tags()
306
308
307 return self._tagstypecache.get(tagname)
309 return self._tagstypecache.get(tagname)
308
310
309 def _hgtagsnodes(self):
311 def _hgtagsnodes(self):
310 heads = self.heads()
312 heads = self.heads()
311 heads.reverse()
313 heads.reverse()
312 last = {}
314 last = {}
313 ret = []
315 ret = []
314 for node in heads:
316 for node in heads:
315 c = self.changectx(node)
317 c = self.changectx(node)
316 rev = c.rev()
318 rev = c.rev()
317 try:
319 try:
318 fnode = c.filenode('.hgtags')
320 fnode = c.filenode('.hgtags')
319 except revlog.LookupError:
321 except revlog.LookupError:
320 continue
322 continue
321 ret.append((rev, node, fnode))
323 ret.append((rev, node, fnode))
322 if fnode in last:
324 if fnode in last:
323 ret[last[fnode]] = None
325 ret[last[fnode]] = None
324 last[fnode] = len(ret) - 1
326 last[fnode] = len(ret) - 1
325 return [item for item in ret if item]
327 return [item for item in ret if item]
326
328
327 def tagslist(self):
329 def tagslist(self):
328 '''return a list of tags ordered by revision'''
330 '''return a list of tags ordered by revision'''
329 l = []
331 l = []
330 for t, n in self.tags().items():
332 for t, n in self.tags().items():
331 try:
333 try:
332 r = self.changelog.rev(n)
334 r = self.changelog.rev(n)
333 except:
335 except:
334 r = -2 # sort to the beginning of the list if unknown
336 r = -2 # sort to the beginning of the list if unknown
335 l.append((r, t, n))
337 l.append((r, t, n))
336 l.sort()
338 l.sort()
337 return [(t, n) for r, t, n in l]
339 return [(t, n) for r, t, n in l]
338
340
339 def nodetags(self, node):
341 def nodetags(self, node):
340 '''return the tags associated with a node'''
342 '''return the tags associated with a node'''
341 if not self.nodetagscache:
343 if not self.nodetagscache:
342 self.nodetagscache = {}
344 self.nodetagscache = {}
343 for t, n in self.tags().items():
345 for t, n in self.tags().items():
344 self.nodetagscache.setdefault(n, []).append(t)
346 self.nodetagscache.setdefault(n, []).append(t)
345 return self.nodetagscache.get(node, [])
347 return self.nodetagscache.get(node, [])
346
348
347 def _branchtags(self):
349 def _branchtags(self, partial, lrev):
348 partial, last, lrev = self._readbranchcache()
349
350 tiprev = self.changelog.count() - 1
350 tiprev = self.changelog.count() - 1
351 if lrev != tiprev:
351 if lrev != tiprev:
352 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._updatebranchcache(partial, lrev+1, tiprev+1)
353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
354
354
355 return partial
355 return partial
356
356
357 def branchtags(self):
357 def branchtags(self):
358 if self.branchcache is not None:
358 tip = self.changelog.tip()
359 if self.branchcache is not None and self._branchcachetip == tip:
359 return self.branchcache
360 return self.branchcache
360
361
362 oldtip = self._branchcachetip
363 self._branchcachetip = tip
364 if self.branchcache is None:
361 self.branchcache = {} # avoid recursion in changectx
365 self.branchcache = {} # avoid recursion in changectx
362 partial = self._branchtags()
366 else:
367 self.branchcache.clear() # keep using the same dict
368 if oldtip is None or oldtip not in self.changelog.nodemap:
369 partial, last, lrev = self._readbranchcache()
370 else:
371 lrev = self.changelog.rev(oldtip)
372 partial = self._ubranchcache
373
374 self._branchtags(partial, lrev)
363
375
364 # the branch cache is stored on disk as UTF-8, but in the local
376 # the branch cache is stored on disk as UTF-8, but in the local
365 # charset internally
377 # charset internally
366 for k, v in partial.items():
378 for k, v in partial.items():
367 self.branchcache[util.tolocal(k)] = v
379 self.branchcache[util.tolocal(k)] = v
380 self._ubranchcache = partial
368 return self.branchcache
381 return self.branchcache
369
382
370 def _readbranchcache(self):
383 def _readbranchcache(self):
371 partial = {}
384 partial = {}
372 try:
385 try:
373 f = self.opener("branch.cache")
386 f = self.opener("branch.cache")
374 lines = f.read().split('\n')
387 lines = f.read().split('\n')
375 f.close()
388 f.close()
376 except (IOError, OSError):
389 except (IOError, OSError):
377 return {}, nullid, nullrev
390 return {}, nullid, nullrev
378
391
379 try:
392 try:
380 last, lrev = lines.pop(0).split(" ", 1)
393 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = bin(last), int(lrev)
394 last, lrev = bin(last), int(lrev)
382 if not (lrev < self.changelog.count() and
395 if not (lrev < self.changelog.count() and
383 self.changelog.node(lrev) == last): # sanity check
396 self.changelog.node(lrev) == last): # sanity check
384 # invalidate the cache
397 # invalidate the cache
385 raise ValueError('invalidating branch cache (tip differs)')
398 raise ValueError('invalidating branch cache (tip differs)')
386 for l in lines:
399 for l in lines:
387 if not l: continue
400 if not l: continue
388 node, label = l.split(" ", 1)
401 node, label = l.split(" ", 1)
389 partial[label.strip()] = bin(node)
402 partial[label.strip()] = bin(node)
390 except (KeyboardInterrupt, util.SignalInterrupt):
403 except (KeyboardInterrupt, util.SignalInterrupt):
391 raise
404 raise
392 except Exception, inst:
405 except Exception, inst:
393 if self.ui.debugflag:
406 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
407 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
408 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
409 return partial, last, lrev
397
410
398 def _writebranchcache(self, branches, tip, tiprev):
411 def _writebranchcache(self, branches, tip, tiprev):
399 try:
412 try:
400 f = self.opener("branch.cache", "w", atomictemp=True)
413 f = self.opener("branch.cache", "w", atomictemp=True)
401 f.write("%s %s\n" % (hex(tip), tiprev))
414 f.write("%s %s\n" % (hex(tip), tiprev))
402 for label, node in branches.iteritems():
415 for label, node in branches.iteritems():
403 f.write("%s %s\n" % (hex(node), label))
416 f.write("%s %s\n" % (hex(node), label))
404 f.rename()
417 f.rename()
405 except (IOError, OSError):
418 except (IOError, OSError):
406 pass
419 pass
407
420
408 def _updatebranchcache(self, partial, start, end):
421 def _updatebranchcache(self, partial, start, end):
409 for r in xrange(start, end):
422 for r in xrange(start, end):
410 c = self.changectx(r)
423 c = self.changectx(r)
411 b = c.branch()
424 b = c.branch()
412 partial[b] = c.node()
425 partial[b] = c.node()
413
426
414 def lookup(self, key):
427 def lookup(self, key):
415 if key == '.':
428 if key == '.':
416 key, second = self.dirstate.parents()
429 key, second = self.dirstate.parents()
417 if key == nullid:
430 if key == nullid:
418 raise repo.RepoError(_("no revision checked out"))
431 raise repo.RepoError(_("no revision checked out"))
419 if second != nullid:
432 if second != nullid:
420 self.ui.warn(_("warning: working directory has two parents, "
433 self.ui.warn(_("warning: working directory has two parents, "
421 "tag '.' uses the first\n"))
434 "tag '.' uses the first\n"))
422 elif key == 'null':
435 elif key == 'null':
423 return nullid
436 return nullid
424 n = self.changelog._match(key)
437 n = self.changelog._match(key)
425 if n:
438 if n:
426 return n
439 return n
427 if key in self.tags():
440 if key in self.tags():
428 return self.tags()[key]
441 return self.tags()[key]
429 if key in self.branchtags():
442 if key in self.branchtags():
430 return self.branchtags()[key]
443 return self.branchtags()[key]
431 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
432 if n:
445 if n:
433 return n
446 return n
434 try:
447 try:
435 if len(key) == 20:
448 if len(key) == 20:
436 key = hex(key)
449 key = hex(key)
437 except:
450 except:
438 pass
451 pass
439 raise repo.RepoError(_("unknown revision '%s'") % key)
452 raise repo.RepoError(_("unknown revision '%s'") % key)
440
453
441 def dev(self):
454 def dev(self):
442 return os.lstat(self.path).st_dev
455 return os.lstat(self.path).st_dev
443
456
444 def local(self):
457 def local(self):
445 return True
458 return True
446
459
447 def join(self, f):
460 def join(self, f):
448 return os.path.join(self.path, f)
461 return os.path.join(self.path, f)
449
462
450 def sjoin(self, f):
463 def sjoin(self, f):
451 f = self.encodefn(f)
464 f = self.encodefn(f)
452 return os.path.join(self.spath, f)
465 return os.path.join(self.spath, f)
453
466
454 def wjoin(self, f):
467 def wjoin(self, f):
455 return os.path.join(self.root, f)
468 return os.path.join(self.root, f)
456
469
457 def file(self, f):
470 def file(self, f):
458 if f[0] == '/':
471 if f[0] == '/':
459 f = f[1:]
472 f = f[1:]
460 return filelog.filelog(self.sopener, f)
473 return filelog.filelog(self.sopener, f)
461
474
462 def changectx(self, changeid=None):
475 def changectx(self, changeid=None):
463 return context.changectx(self, changeid)
476 return context.changectx(self, changeid)
464
477
465 def workingctx(self):
478 def workingctx(self):
466 return context.workingctx(self)
479 return context.workingctx(self)
467
480
468 def parents(self, changeid=None):
481 def parents(self, changeid=None):
469 '''
482 '''
470 get list of changectxs for parents of changeid or working directory
483 get list of changectxs for parents of changeid or working directory
471 '''
484 '''
472 if changeid is None:
485 if changeid is None:
473 pl = self.dirstate.parents()
486 pl = self.dirstate.parents()
474 else:
487 else:
475 n = self.changelog.lookup(changeid)
488 n = self.changelog.lookup(changeid)
476 pl = self.changelog.parents(n)
489 pl = self.changelog.parents(n)
477 if pl[1] == nullid:
490 if pl[1] == nullid:
478 return [self.changectx(pl[0])]
491 return [self.changectx(pl[0])]
479 return [self.changectx(pl[0]), self.changectx(pl[1])]
492 return [self.changectx(pl[0]), self.changectx(pl[1])]
480
493
481 def filectx(self, path, changeid=None, fileid=None):
494 def filectx(self, path, changeid=None, fileid=None):
482 """changeid can be a changeset revision, node, or tag.
495 """changeid can be a changeset revision, node, or tag.
483 fileid can be a file revision or node."""
496 fileid can be a file revision or node."""
484 return context.filectx(self, path, changeid, fileid)
497 return context.filectx(self, path, changeid, fileid)
485
498
486 def getcwd(self):
499 def getcwd(self):
487 return self.dirstate.getcwd()
500 return self.dirstate.getcwd()
488
501
489 def pathto(self, f, cwd=None):
502 def pathto(self, f, cwd=None):
490 return self.dirstate.pathto(f, cwd)
503 return self.dirstate.pathto(f, cwd)
491
504
492 def wfile(self, f, mode='r'):
505 def wfile(self, f, mode='r'):
493 return self.wopener(f, mode)
506 return self.wopener(f, mode)
494
507
495 def _link(self, f):
508 def _link(self, f):
496 return os.path.islink(self.wjoin(f))
509 return os.path.islink(self.wjoin(f))
497
510
498 def _filter(self, filter, filename, data):
511 def _filter(self, filter, filename, data):
499 if filter not in self.filterpats:
512 if filter not in self.filterpats:
500 l = []
513 l = []
501 for pat, cmd in self.ui.configitems(filter):
514 for pat, cmd in self.ui.configitems(filter):
502 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 mf = util.matcher(self.root, "", [pat], [], [])[1]
503 fn = None
516 fn = None
504 params = cmd
517 params = cmd
505 for name, filterfn in self._datafilters.iteritems():
518 for name, filterfn in self._datafilters.iteritems():
506 if cmd.startswith(name):
519 if cmd.startswith(name):
507 fn = filterfn
520 fn = filterfn
508 params = cmd[len(name):].lstrip()
521 params = cmd[len(name):].lstrip()
509 break
522 break
510 if not fn:
523 if not fn:
511 fn = lambda s, c, **kwargs: util.filter(s, c)
524 fn = lambda s, c, **kwargs: util.filter(s, c)
512 # Wrap old filters not supporting keyword arguments
525 # Wrap old filters not supporting keyword arguments
513 if not inspect.getargspec(fn)[2]:
526 if not inspect.getargspec(fn)[2]:
514 oldfn = fn
527 oldfn = fn
515 fn = lambda s, c, **kwargs: oldfn(s, c)
528 fn = lambda s, c, **kwargs: oldfn(s, c)
516 l.append((mf, fn, params))
529 l.append((mf, fn, params))
517 self.filterpats[filter] = l
530 self.filterpats[filter] = l
518
531
519 for mf, fn, cmd in self.filterpats[filter]:
532 for mf, fn, cmd in self.filterpats[filter]:
520 if mf(filename):
533 if mf(filename):
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
534 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
522 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
535 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
523 break
536 break
524
537
525 return data
538 return data
526
539
527 def adddatafilter(self, name, filter):
540 def adddatafilter(self, name, filter):
528 self._datafilters[name] = filter
541 self._datafilters[name] = filter
529
542
530 def wread(self, filename):
543 def wread(self, filename):
531 if self._link(filename):
544 if self._link(filename):
532 data = os.readlink(self.wjoin(filename))
545 data = os.readlink(self.wjoin(filename))
533 else:
546 else:
534 data = self.wopener(filename, 'r').read()
547 data = self.wopener(filename, 'r').read()
535 return self._filter("encode", filename, data)
548 return self._filter("encode", filename, data)
536
549
537 def wwrite(self, filename, data, flags):
550 def wwrite(self, filename, data, flags):
538 data = self._filter("decode", filename, data)
551 data = self._filter("decode", filename, data)
539 try:
552 try:
540 os.unlink(self.wjoin(filename))
553 os.unlink(self.wjoin(filename))
541 except OSError:
554 except OSError:
542 pass
555 pass
543 self.wopener(filename, 'w').write(data)
556 self.wopener(filename, 'w').write(data)
544 util.set_flags(self.wjoin(filename), flags)
557 util.set_flags(self.wjoin(filename), flags)
545
558
546 def wwritedata(self, filename, data):
559 def wwritedata(self, filename, data):
547 return self._filter("decode", filename, data)
560 return self._filter("decode", filename, data)
548
561
549 def transaction(self):
562 def transaction(self):
550 if self._transref and self._transref():
563 if self._transref and self._transref():
551 return self._transref().nest()
564 return self._transref().nest()
552
565
553 # abort here if the journal already exists
566 # abort here if the journal already exists
554 if os.path.exists(self.sjoin("journal")):
567 if os.path.exists(self.sjoin("journal")):
555 raise repo.RepoError(_("journal already exists - run hg recover"))
568 raise repo.RepoError(_("journal already exists - run hg recover"))
556
569
557 # save dirstate for rollback
570 # save dirstate for rollback
558 try:
571 try:
559 ds = self.opener("dirstate").read()
572 ds = self.opener("dirstate").read()
560 except IOError:
573 except IOError:
561 ds = ""
574 ds = ""
562 self.opener("journal.dirstate", "w").write(ds)
575 self.opener("journal.dirstate", "w").write(ds)
563 self.opener("journal.branch", "w").write(self.dirstate.branch())
576 self.opener("journal.branch", "w").write(self.dirstate.branch())
564
577
565 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 renames = [(self.sjoin("journal"), self.sjoin("undo")),
566 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 (self.join("journal.dirstate"), self.join("undo.dirstate")),
567 (self.join("journal.branch"), self.join("undo.branch"))]
580 (self.join("journal.branch"), self.join("undo.branch"))]
568 tr = transaction.transaction(self.ui.warn, self.sopener,
581 tr = transaction.transaction(self.ui.warn, self.sopener,
569 self.sjoin("journal"),
582 self.sjoin("journal"),
570 aftertrans(renames),
583 aftertrans(renames),
571 self._createmode)
584 self._createmode)
572 self._transref = weakref.ref(tr)
585 self._transref = weakref.ref(tr)
573 return tr
586 return tr
574
587
575 def recover(self):
588 def recover(self):
576 l = self.lock()
589 l = self.lock()
577 try:
590 try:
578 if os.path.exists(self.sjoin("journal")):
591 if os.path.exists(self.sjoin("journal")):
579 self.ui.status(_("rolling back interrupted transaction\n"))
592 self.ui.status(_("rolling back interrupted transaction\n"))
580 transaction.rollback(self.sopener, self.sjoin("journal"))
593 transaction.rollback(self.sopener, self.sjoin("journal"))
581 self.invalidate()
594 self.invalidate()
582 return True
595 return True
583 else:
596 else:
584 self.ui.warn(_("no interrupted transaction available\n"))
597 self.ui.warn(_("no interrupted transaction available\n"))
585 return False
598 return False
586 finally:
599 finally:
587 del l
600 del l
588
601
589 def rollback(self):
602 def rollback(self):
590 wlock = lock = None
603 wlock = lock = None
591 try:
604 try:
592 wlock = self.wlock()
605 wlock = self.wlock()
593 lock = self.lock()
606 lock = self.lock()
594 if os.path.exists(self.sjoin("undo")):
607 if os.path.exists(self.sjoin("undo")):
595 self.ui.status(_("rolling back last transaction\n"))
608 self.ui.status(_("rolling back last transaction\n"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
609 transaction.rollback(self.sopener, self.sjoin("undo"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
598 try:
611 try:
599 branch = self.opener("undo.branch").read()
612 branch = self.opener("undo.branch").read()
600 self.dirstate.setbranch(branch)
613 self.dirstate.setbranch(branch)
601 except IOError:
614 except IOError:
602 self.ui.warn(_("Named branch could not be reset, "
615 self.ui.warn(_("Named branch could not be reset, "
603 "current branch still is: %s\n")
616 "current branch still is: %s\n")
604 % util.tolocal(self.dirstate.branch()))
617 % util.tolocal(self.dirstate.branch()))
605 self.invalidate()
618 self.invalidate()
606 self.dirstate.invalidate()
619 self.dirstate.invalidate()
607 else:
620 else:
608 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
609 finally:
622 finally:
610 del lock, wlock
623 del lock, wlock
611
624
612 def invalidate(self):
625 def invalidate(self):
613 for a in "changelog manifest".split():
626 for a in "changelog manifest".split():
614 if hasattr(self, a):
627 if hasattr(self, a):
615 self.__delattr__(a)
628 self.__delattr__(a)
616 self.tagscache = None
629 self.tagscache = None
617 self._tagstypecache = None
630 self._tagstypecache = None
618 self.nodetagscache = None
631 self.nodetagscache = None
632 self.branchcache = None
633 self._ubranchcache = None
634 self._branchcachetip = None
619
635
620 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
621 try:
637 try:
622 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 l = lock.lock(lockname, 0, releasefn, desc=desc)
623 except lock.LockHeld, inst:
639 except lock.LockHeld, inst:
624 if not wait:
640 if not wait:
625 raise
641 raise
626 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 self.ui.warn(_("waiting for lock on %s held by %r\n") %
627 (desc, inst.locker))
643 (desc, inst.locker))
628 # default to 600 seconds timeout
644 # default to 600 seconds timeout
629 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
630 releasefn, desc=desc)
646 releasefn, desc=desc)
631 if acquirefn:
647 if acquirefn:
632 acquirefn()
648 acquirefn()
633 return l
649 return l
634
650
635 def lock(self, wait=True):
651 def lock(self, wait=True):
636 if self._lockref and self._lockref():
652 if self._lockref and self._lockref():
637 return self._lockref()
653 return self._lockref()
638
654
639 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
655 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
640 _('repository %s') % self.origroot)
656 _('repository %s') % self.origroot)
641 self._lockref = weakref.ref(l)
657 self._lockref = weakref.ref(l)
642 return l
658 return l
643
659
644 def wlock(self, wait=True):
660 def wlock(self, wait=True):
645 if self._wlockref and self._wlockref():
661 if self._wlockref and self._wlockref():
646 return self._wlockref()
662 return self._wlockref()
647
663
648 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
649 self.dirstate.invalidate, _('working directory of %s') %
665 self.dirstate.invalidate, _('working directory of %s') %
650 self.origroot)
666 self.origroot)
651 self._wlockref = weakref.ref(l)
667 self._wlockref = weakref.ref(l)
652 return l
668 return l
653
669
654 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
670 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
655 """
671 """
656 commit an individual file as part of a larger transaction
672 commit an individual file as part of a larger transaction
657 """
673 """
658
674
659 t = self.wread(fn)
675 t = self.wread(fn)
660 fl = self.file(fn)
676 fl = self.file(fn)
661 fp1 = manifest1.get(fn, nullid)
677 fp1 = manifest1.get(fn, nullid)
662 fp2 = manifest2.get(fn, nullid)
678 fp2 = manifest2.get(fn, nullid)
663
679
664 meta = {}
680 meta = {}
665 cp = self.dirstate.copied(fn)
681 cp = self.dirstate.copied(fn)
666 if cp:
682 if cp:
667 # Mark the new revision of this file as a copy of another
683 # Mark the new revision of this file as a copy of another
668 # file. This copy data will effectively act as a parent
684 # file. This copy data will effectively act as a parent
669 # of this new revision. If this is a merge, the first
685 # of this new revision. If this is a merge, the first
670 # parent will be the nullid (meaning "look up the copy data")
686 # parent will be the nullid (meaning "look up the copy data")
671 # and the second one will be the other parent. For example:
687 # and the second one will be the other parent. For example:
672 #
688 #
673 # 0 --- 1 --- 3 rev1 changes file foo
689 # 0 --- 1 --- 3 rev1 changes file foo
674 # \ / rev2 renames foo to bar and changes it
690 # \ / rev2 renames foo to bar and changes it
675 # \- 2 -/ rev3 should have bar with all changes and
691 # \- 2 -/ rev3 should have bar with all changes and
676 # should record that bar descends from
692 # should record that bar descends from
677 # bar in rev2 and foo in rev1
693 # bar in rev2 and foo in rev1
678 #
694 #
679 # this allows this merge to succeed:
695 # this allows this merge to succeed:
680 #
696 #
681 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
697 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
682 # \ / merging rev3 and rev4 should use bar@rev2
698 # \ / merging rev3 and rev4 should use bar@rev2
683 # \- 2 --- 4 as the merge base
699 # \- 2 --- 4 as the merge base
684 #
700 #
685 meta["copy"] = cp
701 meta["copy"] = cp
686 if not manifest2: # not a branch merge
702 if not manifest2: # not a branch merge
687 meta["copyrev"] = hex(manifest1.get(cp, nullid))
703 meta["copyrev"] = hex(manifest1.get(cp, nullid))
688 fp2 = nullid
704 fp2 = nullid
689 elif fp2 != nullid: # copied on remote side
705 elif fp2 != nullid: # copied on remote side
690 meta["copyrev"] = hex(manifest1.get(cp, nullid))
706 meta["copyrev"] = hex(manifest1.get(cp, nullid))
691 elif fp1 != nullid: # copied on local side, reversed
707 elif fp1 != nullid: # copied on local side, reversed
692 meta["copyrev"] = hex(manifest2.get(cp))
708 meta["copyrev"] = hex(manifest2.get(cp))
693 fp2 = fp1
709 fp2 = fp1
694 elif cp in manifest2: # directory rename on local side
710 elif cp in manifest2: # directory rename on local side
695 meta["copyrev"] = hex(manifest2[cp])
711 meta["copyrev"] = hex(manifest2[cp])
696 else: # directory rename on remote side
712 else: # directory rename on remote side
697 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 meta["copyrev"] = hex(manifest1.get(cp, nullid))
698 self.ui.debug(_(" %s: copy %s:%s\n") %
714 self.ui.debug(_(" %s: copy %s:%s\n") %
699 (fn, cp, meta["copyrev"]))
715 (fn, cp, meta["copyrev"]))
700 fp1 = nullid
716 fp1 = nullid
701 elif fp2 != nullid:
717 elif fp2 != nullid:
702 # is one parent an ancestor of the other?
718 # is one parent an ancestor of the other?
703 fpa = fl.ancestor(fp1, fp2)
719 fpa = fl.ancestor(fp1, fp2)
704 if fpa == fp1:
720 if fpa == fp1:
705 fp1, fp2 = fp2, nullid
721 fp1, fp2 = fp2, nullid
706 elif fpa == fp2:
722 elif fpa == fp2:
707 fp2 = nullid
723 fp2 = nullid
708
724
709 # is the file unmodified from the parent? report existing entry
725 # is the file unmodified from the parent? report existing entry
710 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
726 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
711 return fp1
727 return fp1
712
728
713 changelist.append(fn)
729 changelist.append(fn)
714 return fl.add(t, meta, tr, linkrev, fp1, fp2)
730 return fl.add(t, meta, tr, linkrev, fp1, fp2)
715
731
716 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
732 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
717 if p1 is None:
733 if p1 is None:
718 p1, p2 = self.dirstate.parents()
734 p1, p2 = self.dirstate.parents()
719 return self.commit(files=files, text=text, user=user, date=date,
735 return self.commit(files=files, text=text, user=user, date=date,
720 p1=p1, p2=p2, extra=extra, empty_ok=True)
736 p1=p1, p2=p2, extra=extra, empty_ok=True)
721
737
722 def commit(self, files=None, text="", user=None, date=None,
738 def commit(self, files=None, text="", user=None, date=None,
723 match=util.always, force=False, force_editor=False,
739 match=util.always, force=False, force_editor=False,
724 p1=None, p2=None, extra={}, empty_ok=False):
740 p1=None, p2=None, extra={}, empty_ok=False):
725 wlock = lock = tr = None
741 wlock = lock = tr = None
726 valid = 0 # don't save the dirstate if this isn't set
742 valid = 0 # don't save the dirstate if this isn't set
727 if files:
743 if files:
728 files = util.unique(files)
744 files = util.unique(files)
729 try:
745 try:
730 commit = []
746 commit = []
731 remove = []
747 remove = []
732 changed = []
748 changed = []
733 use_dirstate = (p1 is None) # not rawcommit
749 use_dirstate = (p1 is None) # not rawcommit
734 extra = extra.copy()
750 extra = extra.copy()
735
751
736 if use_dirstate:
752 if use_dirstate:
737 if files:
753 if files:
738 for f in files:
754 for f in files:
739 s = self.dirstate[f]
755 s = self.dirstate[f]
740 if s in 'nma':
756 if s in 'nma':
741 commit.append(f)
757 commit.append(f)
742 elif s == 'r':
758 elif s == 'r':
743 remove.append(f)
759 remove.append(f)
744 else:
760 else:
745 self.ui.warn(_("%s not tracked!\n") % f)
761 self.ui.warn(_("%s not tracked!\n") % f)
746 else:
762 else:
747 changes = self.status(match=match)[:5]
763 changes = self.status(match=match)[:5]
748 modified, added, removed, deleted, unknown = changes
764 modified, added, removed, deleted, unknown = changes
749 commit = modified + added
765 commit = modified + added
750 remove = removed
766 remove = removed
751 else:
767 else:
752 commit = files
768 commit = files
753
769
754 if use_dirstate:
770 if use_dirstate:
755 p1, p2 = self.dirstate.parents()
771 p1, p2 = self.dirstate.parents()
756 update_dirstate = True
772 update_dirstate = True
757 else:
773 else:
758 p1, p2 = p1, p2 or nullid
774 p1, p2 = p1, p2 or nullid
759 update_dirstate = (self.dirstate.parents()[0] == p1)
775 update_dirstate = (self.dirstate.parents()[0] == p1)
760
776
761 c1 = self.changelog.read(p1)
777 c1 = self.changelog.read(p1)
762 c2 = self.changelog.read(p2)
778 c2 = self.changelog.read(p2)
763 m1 = self.manifest.read(c1[0]).copy()
779 m1 = self.manifest.read(c1[0]).copy()
764 m2 = self.manifest.read(c2[0])
780 m2 = self.manifest.read(c2[0])
765
781
766 if use_dirstate:
782 if use_dirstate:
767 branchname = self.workingctx().branch()
783 branchname = self.workingctx().branch()
768 try:
784 try:
769 branchname = branchname.decode('UTF-8').encode('UTF-8')
785 branchname = branchname.decode('UTF-8').encode('UTF-8')
770 except UnicodeDecodeError:
786 except UnicodeDecodeError:
771 raise util.Abort(_('branch name not in UTF-8!'))
787 raise util.Abort(_('branch name not in UTF-8!'))
772 else:
788 else:
773 branchname = ""
789 branchname = ""
774
790
775 if use_dirstate:
791 if use_dirstate:
776 oldname = c1[5].get("branch") # stored in UTF-8
792 oldname = c1[5].get("branch") # stored in UTF-8
777 if (not commit and not remove and not force and p2 == nullid
793 if (not commit and not remove and not force and p2 == nullid
778 and branchname == oldname):
794 and branchname == oldname):
779 self.ui.status(_("nothing changed\n"))
795 self.ui.status(_("nothing changed\n"))
780 return None
796 return None
781
797
782 xp1 = hex(p1)
798 xp1 = hex(p1)
783 if p2 == nullid: xp2 = ''
799 if p2 == nullid: xp2 = ''
784 else: xp2 = hex(p2)
800 else: xp2 = hex(p2)
785
801
786 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
802 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
787
803
788 wlock = self.wlock()
804 wlock = self.wlock()
789 lock = self.lock()
805 lock = self.lock()
790 tr = self.transaction()
806 tr = self.transaction()
791 trp = weakref.proxy(tr)
807 trp = weakref.proxy(tr)
792
808
793 # check in files
809 # check in files
794 new = {}
810 new = {}
795 linkrev = self.changelog.count()
811 linkrev = self.changelog.count()
796 commit.sort()
812 commit.sort()
797 is_exec = util.execfunc(self.root, m1.execf)
813 is_exec = util.execfunc(self.root, m1.execf)
798 is_link = util.linkfunc(self.root, m1.linkf)
814 is_link = util.linkfunc(self.root, m1.linkf)
799 for f in commit:
815 for f in commit:
800 self.ui.note(f + "\n")
816 self.ui.note(f + "\n")
801 try:
817 try:
802 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
818 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
803 new_exec = is_exec(f)
819 new_exec = is_exec(f)
804 new_link = is_link(f)
820 new_link = is_link(f)
805 if ((not changed or changed[-1] != f) and
821 if ((not changed or changed[-1] != f) and
806 m2.get(f) != new[f]):
822 m2.get(f) != new[f]):
807 # mention the file in the changelog if some
823 # mention the file in the changelog if some
808 # flag changed, even if there was no content
824 # flag changed, even if there was no content
809 # change.
825 # change.
810 old_exec = m1.execf(f)
826 old_exec = m1.execf(f)
811 old_link = m1.linkf(f)
827 old_link = m1.linkf(f)
812 if old_exec != new_exec or old_link != new_link:
828 if old_exec != new_exec or old_link != new_link:
813 changed.append(f)
829 changed.append(f)
814 m1.set(f, new_exec, new_link)
830 m1.set(f, new_exec, new_link)
815 if use_dirstate:
831 if use_dirstate:
816 self.dirstate.normal(f)
832 self.dirstate.normal(f)
817
833
818 except (OSError, IOError):
834 except (OSError, IOError):
819 if use_dirstate:
835 if use_dirstate:
820 self.ui.warn(_("trouble committing %s!\n") % f)
836 self.ui.warn(_("trouble committing %s!\n") % f)
821 raise
837 raise
822 else:
838 else:
823 remove.append(f)
839 remove.append(f)
824
840
825 # update manifest
841 # update manifest
826 m1.update(new)
842 m1.update(new)
827 remove.sort()
843 remove.sort()
828 removed = []
844 removed = []
829
845
830 for f in remove:
846 for f in remove:
831 if f in m1:
847 if f in m1:
832 del m1[f]
848 del m1[f]
833 removed.append(f)
849 removed.append(f)
834 elif f in m2:
850 elif f in m2:
835 removed.append(f)
851 removed.append(f)
836 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
852 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
837 (new, removed))
853 (new, removed))
838
854
839 # add changeset
855 # add changeset
840 new = new.keys()
856 new = new.keys()
841 new.sort()
857 new.sort()
842
858
843 user = user or self.ui.username()
859 user = user or self.ui.username()
844 if (not empty_ok and not text) or force_editor:
860 if (not empty_ok and not text) or force_editor:
845 edittext = []
861 edittext = []
846 if text:
862 if text:
847 edittext.append(text)
863 edittext.append(text)
848 edittext.append("")
864 edittext.append("")
849 edittext.append(_("HG: Enter commit message."
865 edittext.append(_("HG: Enter commit message."
850 " Lines beginning with 'HG:' are removed."))
866 " Lines beginning with 'HG:' are removed."))
851 edittext.append("HG: --")
867 edittext.append("HG: --")
852 edittext.append("HG: user: %s" % user)
868 edittext.append("HG: user: %s" % user)
853 if p2 != nullid:
869 if p2 != nullid:
854 edittext.append("HG: branch merge")
870 edittext.append("HG: branch merge")
855 if branchname:
871 if branchname:
856 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
872 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
857 edittext.extend(["HG: changed %s" % f for f in changed])
873 edittext.extend(["HG: changed %s" % f for f in changed])
858 edittext.extend(["HG: removed %s" % f for f in removed])
874 edittext.extend(["HG: removed %s" % f for f in removed])
859 if not changed and not remove:
875 if not changed and not remove:
860 edittext.append("HG: no files changed")
876 edittext.append("HG: no files changed")
861 edittext.append("")
877 edittext.append("")
862 # run editor in the repository root
878 # run editor in the repository root
863 olddir = os.getcwd()
879 olddir = os.getcwd()
864 os.chdir(self.root)
880 os.chdir(self.root)
865 text = self.ui.edit("\n".join(edittext), user)
881 text = self.ui.edit("\n".join(edittext), user)
866 os.chdir(olddir)
882 os.chdir(olddir)
867
883
868 if branchname:
884 if branchname:
869 extra["branch"] = branchname
885 extra["branch"] = branchname
870
886
871 if use_dirstate:
887 if use_dirstate:
872 lines = [line.rstrip() for line in text.rstrip().splitlines()]
888 lines = [line.rstrip() for line in text.rstrip().splitlines()]
873 while lines and not lines[0]:
889 while lines and not lines[0]:
874 del lines[0]
890 del lines[0]
875 if not lines:
891 if not lines:
876 raise util.Abort(_("empty commit message"))
892 raise util.Abort(_("empty commit message"))
877 text = '\n'.join(lines)
893 text = '\n'.join(lines)
878
894
879 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
895 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
880 user, date, extra)
896 user, date, extra)
881 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
897 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
882 parent2=xp2)
898 parent2=xp2)
883 tr.close()
899 tr.close()
884
900
885 if self.branchcache and "branch" in extra:
901 if self.branchcache:
886 self.branchcache[util.tolocal(extra["branch"])] = n
902 self.branchtags()
887
903
888 if use_dirstate or update_dirstate:
904 if use_dirstate or update_dirstate:
889 self.dirstate.setparents(n)
905 self.dirstate.setparents(n)
890 if use_dirstate:
906 if use_dirstate:
891 for f in removed:
907 for f in removed:
892 self.dirstate.forget(f)
908 self.dirstate.forget(f)
893 valid = 1 # our dirstate updates are complete
909 valid = 1 # our dirstate updates are complete
894
910
895 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
911 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
896 return n
912 return n
897 finally:
913 finally:
898 if not valid: # don't save our updated dirstate
914 if not valid: # don't save our updated dirstate
899 self.dirstate.invalidate()
915 self.dirstate.invalidate()
900 del tr, lock, wlock
916 del tr, lock, wlock
901
917
902 def walk(self, node=None, files=[], match=util.always, badmatch=None):
918 def walk(self, node=None, files=[], match=util.always, badmatch=None):
903 '''
919 '''
904 walk recursively through the directory tree or a given
920 walk recursively through the directory tree or a given
905 changeset, finding all files matched by the match
921 changeset, finding all files matched by the match
906 function
922 function
907
923
908 results are yielded in a tuple (src, filename), where src
924 results are yielded in a tuple (src, filename), where src
909 is one of:
925 is one of:
910 'f' the file was found in the directory tree
926 'f' the file was found in the directory tree
911 'm' the file was only in the dirstate and not in the tree
927 'm' the file was only in the dirstate and not in the tree
912 'b' file was not found and matched badmatch
928 'b' file was not found and matched badmatch
913 '''
929 '''
914
930
915 if node:
931 if node:
916 fdict = dict.fromkeys(files)
932 fdict = dict.fromkeys(files)
917 # for dirstate.walk, files=['.'] means "walk the whole tree".
933 # for dirstate.walk, files=['.'] means "walk the whole tree".
918 # follow that here, too
934 # follow that here, too
919 fdict.pop('.', None)
935 fdict.pop('.', None)
920 mdict = self.manifest.read(self.changelog.read(node)[0])
936 mdict = self.manifest.read(self.changelog.read(node)[0])
921 mfiles = mdict.keys()
937 mfiles = mdict.keys()
922 mfiles.sort()
938 mfiles.sort()
923 for fn in mfiles:
939 for fn in mfiles:
924 for ffn in fdict:
940 for ffn in fdict:
925 # match if the file is the exact name or a directory
941 # match if the file is the exact name or a directory
926 if ffn == fn or fn.startswith("%s/" % ffn):
942 if ffn == fn or fn.startswith("%s/" % ffn):
927 del fdict[ffn]
943 del fdict[ffn]
928 break
944 break
929 if match(fn):
945 if match(fn):
930 yield 'm', fn
946 yield 'm', fn
931 ffiles = fdict.keys()
947 ffiles = fdict.keys()
932 ffiles.sort()
948 ffiles.sort()
933 for fn in ffiles:
949 for fn in ffiles:
934 if badmatch and badmatch(fn):
950 if badmatch and badmatch(fn):
935 if match(fn):
951 if match(fn):
936 yield 'b', fn
952 yield 'b', fn
937 else:
953 else:
938 self.ui.warn(_('%s: No such file in rev %s\n')
954 self.ui.warn(_('%s: No such file in rev %s\n')
939 % (self.pathto(fn), short(node)))
955 % (self.pathto(fn), short(node)))
940 else:
956 else:
941 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
957 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
942 yield src, fn
958 yield src, fn
943
959
944 def status(self, node1=None, node2=None, files=[], match=util.always,
960 def status(self, node1=None, node2=None, files=[], match=util.always,
945 list_ignored=False, list_clean=False):
961 list_ignored=False, list_clean=False):
946 """return status of files between two nodes or node and working directory
962 """return status of files between two nodes or node and working directory
947
963
948 If node1 is None, use the first dirstate parent instead.
964 If node1 is None, use the first dirstate parent instead.
949 If node2 is None, compare node1 with working directory.
965 If node2 is None, compare node1 with working directory.
950 """
966 """
951
967
952 def fcmp(fn, getnode):
968 def fcmp(fn, getnode):
953 t1 = self.wread(fn)
969 t1 = self.wread(fn)
954 return self.file(fn).cmp(getnode(fn), t1)
970 return self.file(fn).cmp(getnode(fn), t1)
955
971
956 def mfmatches(node):
972 def mfmatches(node):
957 change = self.changelog.read(node)
973 change = self.changelog.read(node)
958 mf = self.manifest.read(change[0]).copy()
974 mf = self.manifest.read(change[0]).copy()
959 for fn in mf.keys():
975 for fn in mf.keys():
960 if not match(fn):
976 if not match(fn):
961 del mf[fn]
977 del mf[fn]
962 return mf
978 return mf
963
979
964 modified, added, removed, deleted, unknown = [], [], [], [], []
980 modified, added, removed, deleted, unknown = [], [], [], [], []
965 ignored, clean = [], []
981 ignored, clean = [], []
966
982
967 compareworking = False
983 compareworking = False
968 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
984 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
969 compareworking = True
985 compareworking = True
970
986
971 if not compareworking:
987 if not compareworking:
972 # read the manifest from node1 before the manifest from node2,
988 # read the manifest from node1 before the manifest from node2,
973 # so that we'll hit the manifest cache if we're going through
989 # so that we'll hit the manifest cache if we're going through
974 # all the revisions in parent->child order.
990 # all the revisions in parent->child order.
975 mf1 = mfmatches(node1)
991 mf1 = mfmatches(node1)
976
992
977 # are we comparing the working directory?
993 # are we comparing the working directory?
978 if not node2:
994 if not node2:
979 (lookup, modified, added, removed, deleted, unknown,
995 (lookup, modified, added, removed, deleted, unknown,
980 ignored, clean) = self.dirstate.status(files, match,
996 ignored, clean) = self.dirstate.status(files, match,
981 list_ignored, list_clean)
997 list_ignored, list_clean)
982
998
983 # are we comparing working dir against its parent?
999 # are we comparing working dir against its parent?
984 if compareworking:
1000 if compareworking:
985 if lookup:
1001 if lookup:
986 fixup = []
1002 fixup = []
987 # do a full compare of any files that might have changed
1003 # do a full compare of any files that might have changed
988 ctx = self.changectx()
1004 ctx = self.changectx()
989 for f in lookup:
1005 for f in lookup:
990 if f not in ctx or ctx[f].cmp(self.wread(f)):
1006 if f not in ctx or ctx[f].cmp(self.wread(f)):
991 modified.append(f)
1007 modified.append(f)
992 else:
1008 else:
993 fixup.append(f)
1009 fixup.append(f)
994 if list_clean:
1010 if list_clean:
995 clean.append(f)
1011 clean.append(f)
996
1012
997 # update dirstate for files that are actually clean
1013 # update dirstate for files that are actually clean
998 if fixup:
1014 if fixup:
999 wlock = None
1015 wlock = None
1000 try:
1016 try:
1001 try:
1017 try:
1002 wlock = self.wlock(False)
1018 wlock = self.wlock(False)
1003 except lock.LockException:
1019 except lock.LockException:
1004 pass
1020 pass
1005 if wlock:
1021 if wlock:
1006 for f in fixup:
1022 for f in fixup:
1007 self.dirstate.normal(f)
1023 self.dirstate.normal(f)
1008 finally:
1024 finally:
1009 del wlock
1025 del wlock
1010 else:
1026 else:
1011 # we are comparing working dir against non-parent
1027 # we are comparing working dir against non-parent
1012 # generate a pseudo-manifest for the working dir
1028 # generate a pseudo-manifest for the working dir
1013 # XXX: create it in dirstate.py ?
1029 # XXX: create it in dirstate.py ?
1014 mf2 = mfmatches(self.dirstate.parents()[0])
1030 mf2 = mfmatches(self.dirstate.parents()[0])
1015 is_exec = util.execfunc(self.root, mf2.execf)
1031 is_exec = util.execfunc(self.root, mf2.execf)
1016 is_link = util.linkfunc(self.root, mf2.linkf)
1032 is_link = util.linkfunc(self.root, mf2.linkf)
1017 for f in lookup + modified + added:
1033 for f in lookup + modified + added:
1018 mf2[f] = ""
1034 mf2[f] = ""
1019 mf2.set(f, is_exec(f), is_link(f))
1035 mf2.set(f, is_exec(f), is_link(f))
1020 for f in removed:
1036 for f in removed:
1021 if f in mf2:
1037 if f in mf2:
1022 del mf2[f]
1038 del mf2[f]
1023
1039
1024 else:
1040 else:
1025 # we are comparing two revisions
1041 # we are comparing two revisions
1026 mf2 = mfmatches(node2)
1042 mf2 = mfmatches(node2)
1027
1043
1028 if not compareworking:
1044 if not compareworking:
1029 # flush lists from dirstate before comparing manifests
1045 # flush lists from dirstate before comparing manifests
1030 modified, added, clean = [], [], []
1046 modified, added, clean = [], [], []
1031
1047
1032 # make sure to sort the files so we talk to the disk in a
1048 # make sure to sort the files so we talk to the disk in a
1033 # reasonable order
1049 # reasonable order
1034 mf2keys = mf2.keys()
1050 mf2keys = mf2.keys()
1035 mf2keys.sort()
1051 mf2keys.sort()
1036 getnode = lambda fn: mf1.get(fn, nullid)
1052 getnode = lambda fn: mf1.get(fn, nullid)
1037 for fn in mf2keys:
1053 for fn in mf2keys:
1038 if fn in mf1:
1054 if fn in mf1:
1039 if (mf1.flags(fn) != mf2.flags(fn) or
1055 if (mf1.flags(fn) != mf2.flags(fn) or
1040 (mf1[fn] != mf2[fn] and
1056 (mf1[fn] != mf2[fn] and
1041 (mf2[fn] != "" or fcmp(fn, getnode)))):
1057 (mf2[fn] != "" or fcmp(fn, getnode)))):
1042 modified.append(fn)
1058 modified.append(fn)
1043 elif list_clean:
1059 elif list_clean:
1044 clean.append(fn)
1060 clean.append(fn)
1045 del mf1[fn]
1061 del mf1[fn]
1046 else:
1062 else:
1047 added.append(fn)
1063 added.append(fn)
1048
1064
1049 removed = mf1.keys()
1065 removed = mf1.keys()
1050
1066
1051 # sort and return results:
1067 # sort and return results:
1052 for l in modified, added, removed, deleted, unknown, ignored, clean:
1068 for l in modified, added, removed, deleted, unknown, ignored, clean:
1053 l.sort()
1069 l.sort()
1054 return (modified, added, removed, deleted, unknown, ignored, clean)
1070 return (modified, added, removed, deleted, unknown, ignored, clean)
1055
1071
1056 def add(self, list):
1072 def add(self, list):
1057 wlock = self.wlock()
1073 wlock = self.wlock()
1058 try:
1074 try:
1059 rejected = []
1075 rejected = []
1060 for f in list:
1076 for f in list:
1061 p = self.wjoin(f)
1077 p = self.wjoin(f)
1062 try:
1078 try:
1063 st = os.lstat(p)
1079 st = os.lstat(p)
1064 except:
1080 except:
1065 self.ui.warn(_("%s does not exist!\n") % f)
1081 self.ui.warn(_("%s does not exist!\n") % f)
1066 rejected.append(f)
1082 rejected.append(f)
1067 continue
1083 continue
1068 if st.st_size > 10000000:
1084 if st.st_size > 10000000:
1069 self.ui.warn(_("%s: files over 10MB may cause memory and"
1085 self.ui.warn(_("%s: files over 10MB may cause memory and"
1070 " performance problems\n"
1086 " performance problems\n"
1071 "(use 'hg revert %s' to unadd the file)\n")
1087 "(use 'hg revert %s' to unadd the file)\n")
1072 % (f, f))
1088 % (f, f))
1073 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1089 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1074 self.ui.warn(_("%s not added: only files and symlinks "
1090 self.ui.warn(_("%s not added: only files and symlinks "
1075 "supported currently\n") % f)
1091 "supported currently\n") % f)
1076 rejected.append(p)
1092 rejected.append(p)
1077 elif self.dirstate[f] in 'amn':
1093 elif self.dirstate[f] in 'amn':
1078 self.ui.warn(_("%s already tracked!\n") % f)
1094 self.ui.warn(_("%s already tracked!\n") % f)
1079 elif self.dirstate[f] == 'r':
1095 elif self.dirstate[f] == 'r':
1080 self.dirstate.normallookup(f)
1096 self.dirstate.normallookup(f)
1081 else:
1097 else:
1082 self.dirstate.add(f)
1098 self.dirstate.add(f)
1083 return rejected
1099 return rejected
1084 finally:
1100 finally:
1085 del wlock
1101 del wlock
1086
1102
1087 def forget(self, list):
1103 def forget(self, list):
1088 wlock = self.wlock()
1104 wlock = self.wlock()
1089 try:
1105 try:
1090 for f in list:
1106 for f in list:
1091 if self.dirstate[f] != 'a':
1107 if self.dirstate[f] != 'a':
1092 self.ui.warn(_("%s not added!\n") % f)
1108 self.ui.warn(_("%s not added!\n") % f)
1093 else:
1109 else:
1094 self.dirstate.forget(f)
1110 self.dirstate.forget(f)
1095 finally:
1111 finally:
1096 del wlock
1112 del wlock
1097
1113
1098 def remove(self, list, unlink=False):
1114 def remove(self, list, unlink=False):
1099 wlock = None
1115 wlock = None
1100 try:
1116 try:
1101 if unlink:
1117 if unlink:
1102 for f in list:
1118 for f in list:
1103 try:
1119 try:
1104 util.unlink(self.wjoin(f))
1120 util.unlink(self.wjoin(f))
1105 except OSError, inst:
1121 except OSError, inst:
1106 if inst.errno != errno.ENOENT:
1122 if inst.errno != errno.ENOENT:
1107 raise
1123 raise
1108 wlock = self.wlock()
1124 wlock = self.wlock()
1109 for f in list:
1125 for f in list:
1110 if unlink and os.path.exists(self.wjoin(f)):
1126 if unlink and os.path.exists(self.wjoin(f)):
1111 self.ui.warn(_("%s still exists!\n") % f)
1127 self.ui.warn(_("%s still exists!\n") % f)
1112 elif self.dirstate[f] == 'a':
1128 elif self.dirstate[f] == 'a':
1113 self.dirstate.forget(f)
1129 self.dirstate.forget(f)
1114 elif f not in self.dirstate:
1130 elif f not in self.dirstate:
1115 self.ui.warn(_("%s not tracked!\n") % f)
1131 self.ui.warn(_("%s not tracked!\n") % f)
1116 else:
1132 else:
1117 self.dirstate.remove(f)
1133 self.dirstate.remove(f)
1118 finally:
1134 finally:
1119 del wlock
1135 del wlock
1120
1136
1121 def undelete(self, list):
1137 def undelete(self, list):
1122 wlock = None
1138 wlock = None
1123 try:
1139 try:
1124 manifests = [self.manifest.read(self.changelog.read(p)[0])
1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 for p in self.dirstate.parents() if p != nullid]
1141 for p in self.dirstate.parents() if p != nullid]
1126 wlock = self.wlock()
1142 wlock = self.wlock()
1127 for f in list:
1143 for f in list:
1128 if self.dirstate[f] != 'r':
1144 if self.dirstate[f] != 'r':
1129 self.ui.warn("%s not removed!\n" % f)
1145 self.ui.warn("%s not removed!\n" % f)
1130 else:
1146 else:
1131 m = f in manifests[0] and manifests[0] or manifests[1]
1147 m = f in manifests[0] and manifests[0] or manifests[1]
1132 t = self.file(f).read(m[f])
1148 t = self.file(f).read(m[f])
1133 self.wwrite(f, t, m.flags(f))
1149 self.wwrite(f, t, m.flags(f))
1134 self.dirstate.normal(f)
1150 self.dirstate.normal(f)
1135 finally:
1151 finally:
1136 del wlock
1152 del wlock
1137
1153
1138 def copy(self, source, dest):
1154 def copy(self, source, dest):
1139 wlock = None
1155 wlock = None
1140 try:
1156 try:
1141 p = self.wjoin(dest)
1157 p = self.wjoin(dest)
1142 if not (os.path.exists(p) or os.path.islink(p)):
1158 if not (os.path.exists(p) or os.path.islink(p)):
1143 self.ui.warn(_("%s does not exist!\n") % dest)
1159 self.ui.warn(_("%s does not exist!\n") % dest)
1144 elif not (os.path.isfile(p) or os.path.islink(p)):
1160 elif not (os.path.isfile(p) or os.path.islink(p)):
1145 self.ui.warn(_("copy failed: %s is not a file or a "
1161 self.ui.warn(_("copy failed: %s is not a file or a "
1146 "symbolic link\n") % dest)
1162 "symbolic link\n") % dest)
1147 else:
1163 else:
1148 wlock = self.wlock()
1164 wlock = self.wlock()
1149 if dest not in self.dirstate:
1165 if dest not in self.dirstate:
1150 self.dirstate.add(dest)
1166 self.dirstate.add(dest)
1151 self.dirstate.copy(source, dest)
1167 self.dirstate.copy(source, dest)
1152 finally:
1168 finally:
1153 del wlock
1169 del wlock
1154
1170
1155 def heads(self, start=None):
1171 def heads(self, start=None):
1156 heads = self.changelog.heads(start)
1172 heads = self.changelog.heads(start)
1157 # sort the output in rev descending order
1173 # sort the output in rev descending order
1158 heads = [(-self.changelog.rev(h), h) for h in heads]
1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 heads.sort()
1175 heads.sort()
1160 return [n for (r, n) in heads]
1176 return [n for (r, n) in heads]
1161
1177
1162 def branchheads(self, branch, start=None):
1178 def branchheads(self, branch, start=None):
1163 branches = self.branchtags()
1179 branches = self.branchtags()
1164 if branch not in branches:
1180 if branch not in branches:
1165 return []
1181 return []
1166 # The basic algorithm is this:
1182 # The basic algorithm is this:
1167 #
1183 #
1168 # Start from the branch tip since there are no later revisions that can
1184 # Start from the branch tip since there are no later revisions that can
1169 # possibly be in this branch, and the tip is a guaranteed head.
1185 # possibly be in this branch, and the tip is a guaranteed head.
1170 #
1186 #
1171 # Remember the tip's parents as the first ancestors, since these by
1187 # Remember the tip's parents as the first ancestors, since these by
1172 # definition are not heads.
1188 # definition are not heads.
1173 #
1189 #
1174 # Step backwards from the brach tip through all the revisions. We are
1190 # Step backwards from the brach tip through all the revisions. We are
1175 # guaranteed by the rules of Mercurial that we will now be visiting the
1191 # guaranteed by the rules of Mercurial that we will now be visiting the
1176 # nodes in reverse topological order (children before parents).
1192 # nodes in reverse topological order (children before parents).
1177 #
1193 #
1178 # If a revision is one of the ancestors of a head then we can toss it
1194 # If a revision is one of the ancestors of a head then we can toss it
1179 # out of the ancestors set (we've already found it and won't be
1195 # out of the ancestors set (we've already found it and won't be
1180 # visiting it again) and put its parents in the ancestors set.
1196 # visiting it again) and put its parents in the ancestors set.
1181 #
1197 #
1182 # Otherwise, if a revision is in the branch it's another head, since it
1198 # Otherwise, if a revision is in the branch it's another head, since it
1183 # wasn't in the ancestor list of an existing head. So add it to the
1199 # wasn't in the ancestor list of an existing head. So add it to the
1184 # head list, and add its parents to the ancestor list.
1200 # head list, and add its parents to the ancestor list.
1185 #
1201 #
1186 # If it is not in the branch ignore it.
1202 # If it is not in the branch ignore it.
1187 #
1203 #
1188 # Once we have a list of heads, use nodesbetween to filter out all the
1204 # Once we have a list of heads, use nodesbetween to filter out all the
1189 # heads that cannot be reached from startrev. There may be a more
1205 # heads that cannot be reached from startrev. There may be a more
1190 # efficient way to do this as part of the previous algorithm.
1206 # efficient way to do this as part of the previous algorithm.
1191
1207
1192 set = util.set
1208 set = util.set
1193 heads = [self.changelog.rev(branches[branch])]
1209 heads = [self.changelog.rev(branches[branch])]
1194 # Don't care if ancestors contains nullrev or not.
1210 # Don't care if ancestors contains nullrev or not.
1195 ancestors = set(self.changelog.parentrevs(heads[0]))
1211 ancestors = set(self.changelog.parentrevs(heads[0]))
1196 for rev in xrange(heads[0] - 1, nullrev, -1):
1212 for rev in xrange(heads[0] - 1, nullrev, -1):
1197 if rev in ancestors:
1213 if rev in ancestors:
1198 ancestors.update(self.changelog.parentrevs(rev))
1214 ancestors.update(self.changelog.parentrevs(rev))
1199 ancestors.remove(rev)
1215 ancestors.remove(rev)
1200 elif self.changectx(rev).branch() == branch:
1216 elif self.changectx(rev).branch() == branch:
1201 heads.append(rev)
1217 heads.append(rev)
1202 ancestors.update(self.changelog.parentrevs(rev))
1218 ancestors.update(self.changelog.parentrevs(rev))
1203 heads = [self.changelog.node(rev) for rev in heads]
1219 heads = [self.changelog.node(rev) for rev in heads]
1204 if start is not None:
1220 if start is not None:
1205 heads = self.changelog.nodesbetween([start], heads)[2]
1221 heads = self.changelog.nodesbetween([start], heads)[2]
1206 return heads
1222 return heads
1207
1223
1208 def branches(self, nodes):
1224 def branches(self, nodes):
1209 if not nodes:
1225 if not nodes:
1210 nodes = [self.changelog.tip()]
1226 nodes = [self.changelog.tip()]
1211 b = []
1227 b = []
1212 for n in nodes:
1228 for n in nodes:
1213 t = n
1229 t = n
1214 while 1:
1230 while 1:
1215 p = self.changelog.parents(n)
1231 p = self.changelog.parents(n)
1216 if p[1] != nullid or p[0] == nullid:
1232 if p[1] != nullid or p[0] == nullid:
1217 b.append((t, n, p[0], p[1]))
1233 b.append((t, n, p[0], p[1]))
1218 break
1234 break
1219 n = p[0]
1235 n = p[0]
1220 return b
1236 return b
1221
1237
1222 def between(self, pairs):
1238 def between(self, pairs):
1223 r = []
1239 r = []
1224
1240
1225 for top, bottom in pairs:
1241 for top, bottom in pairs:
1226 n, l, i = top, [], 0
1242 n, l, i = top, [], 0
1227 f = 1
1243 f = 1
1228
1244
1229 while n != bottom:
1245 while n != bottom:
1230 p = self.changelog.parents(n)[0]
1246 p = self.changelog.parents(n)[0]
1231 if i == f:
1247 if i == f:
1232 l.append(n)
1248 l.append(n)
1233 f = f * 2
1249 f = f * 2
1234 n = p
1250 n = p
1235 i += 1
1251 i += 1
1236
1252
1237 r.append(l)
1253 r.append(l)
1238
1254
1239 return r
1255 return r
1240
1256
1241 def findincoming(self, remote, base=None, heads=None, force=False):
1257 def findincoming(self, remote, base=None, heads=None, force=False):
1242 """Return list of roots of the subsets of missing nodes from remote
1258 """Return list of roots of the subsets of missing nodes from remote
1243
1259
1244 If base dict is specified, assume that these nodes and their parents
1260 If base dict is specified, assume that these nodes and their parents
1245 exist on the remote side and that no child of a node of base exists
1261 exist on the remote side and that no child of a node of base exists
1246 in both remote and self.
1262 in both remote and self.
1247 Furthermore base will be updated to include the nodes that exists
1263 Furthermore base will be updated to include the nodes that exists
1248 in self and remote but no children exists in self and remote.
1264 in self and remote but no children exists in self and remote.
1249 If a list of heads is specified, return only nodes which are heads
1265 If a list of heads is specified, return only nodes which are heads
1250 or ancestors of these heads.
1266 or ancestors of these heads.
1251
1267
1252 All the ancestors of base are in self and in remote.
1268 All the ancestors of base are in self and in remote.
1253 All the descendants of the list returned are missing in self.
1269 All the descendants of the list returned are missing in self.
1254 (and so we know that the rest of the nodes are missing in remote, see
1270 (and so we know that the rest of the nodes are missing in remote, see
1255 outgoing)
1271 outgoing)
1256 """
1272 """
1257 m = self.changelog.nodemap
1273 m = self.changelog.nodemap
1258 search = []
1274 search = []
1259 fetch = {}
1275 fetch = {}
1260 seen = {}
1276 seen = {}
1261 seenbranch = {}
1277 seenbranch = {}
1262 if base == None:
1278 if base == None:
1263 base = {}
1279 base = {}
1264
1280
1265 if not heads:
1281 if not heads:
1266 heads = remote.heads()
1282 heads = remote.heads()
1267
1283
1268 if self.changelog.tip() == nullid:
1284 if self.changelog.tip() == nullid:
1269 base[nullid] = 1
1285 base[nullid] = 1
1270 if heads != [nullid]:
1286 if heads != [nullid]:
1271 return [nullid]
1287 return [nullid]
1272 return []
1288 return []
1273
1289
1274 # assume we're closer to the tip than the root
1290 # assume we're closer to the tip than the root
1275 # and start by examining the heads
1291 # and start by examining the heads
1276 self.ui.status(_("searching for changes\n"))
1292 self.ui.status(_("searching for changes\n"))
1277
1293
1278 unknown = []
1294 unknown = []
1279 for h in heads:
1295 for h in heads:
1280 if h not in m:
1296 if h not in m:
1281 unknown.append(h)
1297 unknown.append(h)
1282 else:
1298 else:
1283 base[h] = 1
1299 base[h] = 1
1284
1300
1285 if not unknown:
1301 if not unknown:
1286 return []
1302 return []
1287
1303
1288 req = dict.fromkeys(unknown)
1304 req = dict.fromkeys(unknown)
1289 reqcnt = 0
1305 reqcnt = 0
1290
1306
1291 # search through remote branches
1307 # search through remote branches
1292 # a 'branch' here is a linear segment of history, with four parts:
1308 # a 'branch' here is a linear segment of history, with four parts:
1293 # head, root, first parent, second parent
1309 # head, root, first parent, second parent
1294 # (a branch always has two parents (or none) by definition)
1310 # (a branch always has two parents (or none) by definition)
1295 unknown = remote.branches(unknown)
1311 unknown = remote.branches(unknown)
1296 while unknown:
1312 while unknown:
1297 r = []
1313 r = []
1298 while unknown:
1314 while unknown:
1299 n = unknown.pop(0)
1315 n = unknown.pop(0)
1300 if n[0] in seen:
1316 if n[0] in seen:
1301 continue
1317 continue
1302
1318
1303 self.ui.debug(_("examining %s:%s\n")
1319 self.ui.debug(_("examining %s:%s\n")
1304 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1305 if n[0] == nullid: # found the end of the branch
1321 if n[0] == nullid: # found the end of the branch
1306 pass
1322 pass
1307 elif n in seenbranch:
1323 elif n in seenbranch:
1308 self.ui.debug(_("branch already found\n"))
1324 self.ui.debug(_("branch already found\n"))
1309 continue
1325 continue
1310 elif n[1] and n[1] in m: # do we know the base?
1326 elif n[1] and n[1] in m: # do we know the base?
1311 self.ui.debug(_("found incomplete branch %s:%s\n")
1327 self.ui.debug(_("found incomplete branch %s:%s\n")
1312 % (short(n[0]), short(n[1])))
1328 % (short(n[0]), short(n[1])))
1313 search.append(n) # schedule branch range for scanning
1329 search.append(n) # schedule branch range for scanning
1314 seenbranch[n] = 1
1330 seenbranch[n] = 1
1315 else:
1331 else:
1316 if n[1] not in seen and n[1] not in fetch:
1332 if n[1] not in seen and n[1] not in fetch:
1317 if n[2] in m and n[3] in m:
1333 if n[2] in m and n[3] in m:
1318 self.ui.debug(_("found new changeset %s\n") %
1334 self.ui.debug(_("found new changeset %s\n") %
1319 short(n[1]))
1335 short(n[1]))
1320 fetch[n[1]] = 1 # earliest unknown
1336 fetch[n[1]] = 1 # earliest unknown
1321 for p in n[2:4]:
1337 for p in n[2:4]:
1322 if p in m:
1338 if p in m:
1323 base[p] = 1 # latest known
1339 base[p] = 1 # latest known
1324
1340
1325 for p in n[2:4]:
1341 for p in n[2:4]:
1326 if p not in req and p not in m:
1342 if p not in req and p not in m:
1327 r.append(p)
1343 r.append(p)
1328 req[p] = 1
1344 req[p] = 1
1329 seen[n[0]] = 1
1345 seen[n[0]] = 1
1330
1346
1331 if r:
1347 if r:
1332 reqcnt += 1
1348 reqcnt += 1
1333 self.ui.debug(_("request %d: %s\n") %
1349 self.ui.debug(_("request %d: %s\n") %
1334 (reqcnt, " ".join(map(short, r))))
1350 (reqcnt, " ".join(map(short, r))))
1335 for p in xrange(0, len(r), 10):
1351 for p in xrange(0, len(r), 10):
1336 for b in remote.branches(r[p:p+10]):
1352 for b in remote.branches(r[p:p+10]):
1337 self.ui.debug(_("received %s:%s\n") %
1353 self.ui.debug(_("received %s:%s\n") %
1338 (short(b[0]), short(b[1])))
1354 (short(b[0]), short(b[1])))
1339 unknown.append(b)
1355 unknown.append(b)
1340
1356
1341 # do binary search on the branches we found
1357 # do binary search on the branches we found
1342 while search:
1358 while search:
1343 n = search.pop(0)
1359 n = search.pop(0)
1344 reqcnt += 1
1360 reqcnt += 1
1345 l = remote.between([(n[0], n[1])])[0]
1361 l = remote.between([(n[0], n[1])])[0]
1346 l.append(n[1])
1362 l.append(n[1])
1347 p = n[0]
1363 p = n[0]
1348 f = 1
1364 f = 1
1349 for i in l:
1365 for i in l:
1350 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1366 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1351 if i in m:
1367 if i in m:
1352 if f <= 2:
1368 if f <= 2:
1353 self.ui.debug(_("found new branch changeset %s\n") %
1369 self.ui.debug(_("found new branch changeset %s\n") %
1354 short(p))
1370 short(p))
1355 fetch[p] = 1
1371 fetch[p] = 1
1356 base[i] = 1
1372 base[i] = 1
1357 else:
1373 else:
1358 self.ui.debug(_("narrowed branch search to %s:%s\n")
1374 self.ui.debug(_("narrowed branch search to %s:%s\n")
1359 % (short(p), short(i)))
1375 % (short(p), short(i)))
1360 search.append((p, i))
1376 search.append((p, i))
1361 break
1377 break
1362 p, f = i, f * 2
1378 p, f = i, f * 2
1363
1379
1364 # sanity check our fetch list
1380 # sanity check our fetch list
1365 for f in fetch.keys():
1381 for f in fetch.keys():
1366 if f in m:
1382 if f in m:
1367 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1383 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1368
1384
1369 if base.keys() == [nullid]:
1385 if base.keys() == [nullid]:
1370 if force:
1386 if force:
1371 self.ui.warn(_("warning: repository is unrelated\n"))
1387 self.ui.warn(_("warning: repository is unrelated\n"))
1372 else:
1388 else:
1373 raise util.Abort(_("repository is unrelated"))
1389 raise util.Abort(_("repository is unrelated"))
1374
1390
1375 self.ui.debug(_("found new changesets starting at ") +
1391 self.ui.debug(_("found new changesets starting at ") +
1376 " ".join([short(f) for f in fetch]) + "\n")
1392 " ".join([short(f) for f in fetch]) + "\n")
1377
1393
1378 self.ui.debug(_("%d total queries\n") % reqcnt)
1394 self.ui.debug(_("%d total queries\n") % reqcnt)
1379
1395
1380 return fetch.keys()
1396 return fetch.keys()
1381
1397
1382 def findoutgoing(self, remote, base=None, heads=None, force=False):
1398 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 """Return list of nodes that are roots of subsets not in remote
1399 """Return list of nodes that are roots of subsets not in remote
1384
1400
1385 If base dict is specified, assume that these nodes and their parents
1401 If base dict is specified, assume that these nodes and their parents
1386 exist on the remote side.
1402 exist on the remote side.
1387 If a list of heads is specified, return only nodes which are heads
1403 If a list of heads is specified, return only nodes which are heads
1388 or ancestors of these heads, and return a second element which
1404 or ancestors of these heads, and return a second element which
1389 contains all remote heads which get new children.
1405 contains all remote heads which get new children.
1390 """
1406 """
1391 if base == None:
1407 if base == None:
1392 base = {}
1408 base = {}
1393 self.findincoming(remote, base, heads, force=force)
1409 self.findincoming(remote, base, heads, force=force)
1394
1410
1395 self.ui.debug(_("common changesets up to ")
1411 self.ui.debug(_("common changesets up to ")
1396 + " ".join(map(short, base.keys())) + "\n")
1412 + " ".join(map(short, base.keys())) + "\n")
1397
1413
1398 remain = dict.fromkeys(self.changelog.nodemap)
1414 remain = dict.fromkeys(self.changelog.nodemap)
1399
1415
1400 # prune everything remote has from the tree
1416 # prune everything remote has from the tree
1401 del remain[nullid]
1417 del remain[nullid]
1402 remove = base.keys()
1418 remove = base.keys()
1403 while remove:
1419 while remove:
1404 n = remove.pop(0)
1420 n = remove.pop(0)
1405 if n in remain:
1421 if n in remain:
1406 del remain[n]
1422 del remain[n]
1407 for p in self.changelog.parents(n):
1423 for p in self.changelog.parents(n):
1408 remove.append(p)
1424 remove.append(p)
1409
1425
1410 # find every node whose parents have been pruned
1426 # find every node whose parents have been pruned
1411 subset = []
1427 subset = []
1412 # find every remote head that will get new children
1428 # find every remote head that will get new children
1413 updated_heads = {}
1429 updated_heads = {}
1414 for n in remain:
1430 for n in remain:
1415 p1, p2 = self.changelog.parents(n)
1431 p1, p2 = self.changelog.parents(n)
1416 if p1 not in remain and p2 not in remain:
1432 if p1 not in remain and p2 not in remain:
1417 subset.append(n)
1433 subset.append(n)
1418 if heads:
1434 if heads:
1419 if p1 in heads:
1435 if p1 in heads:
1420 updated_heads[p1] = True
1436 updated_heads[p1] = True
1421 if p2 in heads:
1437 if p2 in heads:
1422 updated_heads[p2] = True
1438 updated_heads[p2] = True
1423
1439
1424 # this is the set of all roots we have to push
1440 # this is the set of all roots we have to push
1425 if heads:
1441 if heads:
1426 return subset, updated_heads.keys()
1442 return subset, updated_heads.keys()
1427 else:
1443 else:
1428 return subset
1444 return subset
1429
1445
1430 def pull(self, remote, heads=None, force=False):
1446 def pull(self, remote, heads=None, force=False):
1431 lock = self.lock()
1447 lock = self.lock()
1432 try:
1448 try:
1433 fetch = self.findincoming(remote, heads=heads, force=force)
1449 fetch = self.findincoming(remote, heads=heads, force=force)
1434 if fetch == [nullid]:
1450 if fetch == [nullid]:
1435 self.ui.status(_("requesting all changes\n"))
1451 self.ui.status(_("requesting all changes\n"))
1436
1452
1437 if not fetch:
1453 if not fetch:
1438 self.ui.status(_("no changes found\n"))
1454 self.ui.status(_("no changes found\n"))
1439 return 0
1455 return 0
1440
1456
1441 if heads is None:
1457 if heads is None:
1442 cg = remote.changegroup(fetch, 'pull')
1458 cg = remote.changegroup(fetch, 'pull')
1443 else:
1459 else:
1444 if 'changegroupsubset' not in remote.capabilities:
1460 if 'changegroupsubset' not in remote.capabilities:
1445 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1461 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1446 cg = remote.changegroupsubset(fetch, heads, 'pull')
1462 cg = remote.changegroupsubset(fetch, heads, 'pull')
1447 return self.addchangegroup(cg, 'pull', remote.url())
1463 return self.addchangegroup(cg, 'pull', remote.url())
1448 finally:
1464 finally:
1449 del lock
1465 del lock
1450
1466
1451 def push(self, remote, force=False, revs=None):
1467 def push(self, remote, force=False, revs=None):
1452 # there are two ways to push to remote repo:
1468 # there are two ways to push to remote repo:
1453 #
1469 #
1454 # addchangegroup assumes local user can lock remote
1470 # addchangegroup assumes local user can lock remote
1455 # repo (local filesystem, old ssh servers).
1471 # repo (local filesystem, old ssh servers).
1456 #
1472 #
1457 # unbundle assumes local user cannot lock remote repo (new ssh
1473 # unbundle assumes local user cannot lock remote repo (new ssh
1458 # servers, http servers).
1474 # servers, http servers).
1459
1475
1460 if remote.capable('unbundle'):
1476 if remote.capable('unbundle'):
1461 return self.push_unbundle(remote, force, revs)
1477 return self.push_unbundle(remote, force, revs)
1462 return self.push_addchangegroup(remote, force, revs)
1478 return self.push_addchangegroup(remote, force, revs)
1463
1479
1464 def prepush(self, remote, force, revs):
1480 def prepush(self, remote, force, revs):
1465 base = {}
1481 base = {}
1466 remote_heads = remote.heads()
1482 remote_heads = remote.heads()
1467 inc = self.findincoming(remote, base, remote_heads, force=force)
1483 inc = self.findincoming(remote, base, remote_heads, force=force)
1468
1484
1469 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1485 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1470 if revs is not None:
1486 if revs is not None:
1471 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1487 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1472 else:
1488 else:
1473 bases, heads = update, self.changelog.heads()
1489 bases, heads = update, self.changelog.heads()
1474
1490
1475 if not bases:
1491 if not bases:
1476 self.ui.status(_("no changes found\n"))
1492 self.ui.status(_("no changes found\n"))
1477 return None, 1
1493 return None, 1
1478 elif not force:
1494 elif not force:
1479 # check if we're creating new remote heads
1495 # check if we're creating new remote heads
1480 # to be a remote head after push, node must be either
1496 # to be a remote head after push, node must be either
1481 # - unknown locally
1497 # - unknown locally
1482 # - a local outgoing head descended from update
1498 # - a local outgoing head descended from update
1483 # - a remote head that's known locally and not
1499 # - a remote head that's known locally and not
1484 # ancestral to an outgoing head
1500 # ancestral to an outgoing head
1485
1501
1486 warn = 0
1502 warn = 0
1487
1503
1488 if remote_heads == [nullid]:
1504 if remote_heads == [nullid]:
1489 warn = 0
1505 warn = 0
1490 elif not revs and len(heads) > len(remote_heads):
1506 elif not revs and len(heads) > len(remote_heads):
1491 warn = 1
1507 warn = 1
1492 else:
1508 else:
1493 newheads = list(heads)
1509 newheads = list(heads)
1494 for r in remote_heads:
1510 for r in remote_heads:
1495 if r in self.changelog.nodemap:
1511 if r in self.changelog.nodemap:
1496 desc = self.changelog.heads(r, heads)
1512 desc = self.changelog.heads(r, heads)
1497 l = [h for h in heads if h in desc]
1513 l = [h for h in heads if h in desc]
1498 if not l:
1514 if not l:
1499 newheads.append(r)
1515 newheads.append(r)
1500 else:
1516 else:
1501 newheads.append(r)
1517 newheads.append(r)
1502 if len(newheads) > len(remote_heads):
1518 if len(newheads) > len(remote_heads):
1503 warn = 1
1519 warn = 1
1504
1520
1505 if warn:
1521 if warn:
1506 self.ui.warn(_("abort: push creates new remote branches!\n"))
1522 self.ui.warn(_("abort: push creates new remote branches!\n"))
1507 self.ui.status(_("(did you forget to merge?"
1523 self.ui.status(_("(did you forget to merge?"
1508 " use push -f to force)\n"))
1524 " use push -f to force)\n"))
1509 return None, 1
1525 return None, 1
1510 elif inc:
1526 elif inc:
1511 self.ui.warn(_("note: unsynced remote changes!\n"))
1527 self.ui.warn(_("note: unsynced remote changes!\n"))
1512
1528
1513
1529
1514 if revs is None:
1530 if revs is None:
1515 cg = self.changegroup(update, 'push')
1531 cg = self.changegroup(update, 'push')
1516 else:
1532 else:
1517 cg = self.changegroupsubset(update, revs, 'push')
1533 cg = self.changegroupsubset(update, revs, 'push')
1518 return cg, remote_heads
1534 return cg, remote_heads
1519
1535
1520 def push_addchangegroup(self, remote, force, revs):
1536 def push_addchangegroup(self, remote, force, revs):
1521 lock = remote.lock()
1537 lock = remote.lock()
1522 try:
1538 try:
1523 ret = self.prepush(remote, force, revs)
1539 ret = self.prepush(remote, force, revs)
1524 if ret[0] is not None:
1540 if ret[0] is not None:
1525 cg, remote_heads = ret
1541 cg, remote_heads = ret
1526 return remote.addchangegroup(cg, 'push', self.url())
1542 return remote.addchangegroup(cg, 'push', self.url())
1527 return ret[1]
1543 return ret[1]
1528 finally:
1544 finally:
1529 del lock
1545 del lock
1530
1546
1531 def push_unbundle(self, remote, force, revs):
1547 def push_unbundle(self, remote, force, revs):
1532 # local repo finds heads on server, finds out what revs it
1548 # local repo finds heads on server, finds out what revs it
1533 # must push. once revs transferred, if server finds it has
1549 # must push. once revs transferred, if server finds it has
1534 # different heads (someone else won commit/push race), server
1550 # different heads (someone else won commit/push race), server
1535 # aborts.
1551 # aborts.
1536
1552
1537 ret = self.prepush(remote, force, revs)
1553 ret = self.prepush(remote, force, revs)
1538 if ret[0] is not None:
1554 if ret[0] is not None:
1539 cg, remote_heads = ret
1555 cg, remote_heads = ret
1540 if force: remote_heads = ['force']
1556 if force: remote_heads = ['force']
1541 return remote.unbundle(cg, remote_heads, 'push')
1557 return remote.unbundle(cg, remote_heads, 'push')
1542 return ret[1]
1558 return ret[1]
1543
1559
1544 def changegroupinfo(self, nodes, source):
1560 def changegroupinfo(self, nodes, source):
1545 if self.ui.verbose or source == 'bundle':
1561 if self.ui.verbose or source == 'bundle':
1546 self.ui.status(_("%d changesets found\n") % len(nodes))
1562 self.ui.status(_("%d changesets found\n") % len(nodes))
1547 if self.ui.debugflag:
1563 if self.ui.debugflag:
1548 self.ui.debug(_("List of changesets:\n"))
1564 self.ui.debug(_("List of changesets:\n"))
1549 for node in nodes:
1565 for node in nodes:
1550 self.ui.debug("%s\n" % hex(node))
1566 self.ui.debug("%s\n" % hex(node))
1551
1567
1552 def changegroupsubset(self, bases, heads, source, extranodes=None):
1568 def changegroupsubset(self, bases, heads, source, extranodes=None):
1553 """This function generates a changegroup consisting of all the nodes
1569 """This function generates a changegroup consisting of all the nodes
1554 that are descendents of any of the bases, and ancestors of any of
1570 that are descendents of any of the bases, and ancestors of any of
1555 the heads.
1571 the heads.
1556
1572
1557 It is fairly complex as determining which filenodes and which
1573 It is fairly complex as determining which filenodes and which
1558 manifest nodes need to be included for the changeset to be complete
1574 manifest nodes need to be included for the changeset to be complete
1559 is non-trivial.
1575 is non-trivial.
1560
1576
1561 Another wrinkle is doing the reverse, figuring out which changeset in
1577 Another wrinkle is doing the reverse, figuring out which changeset in
1562 the changegroup a particular filenode or manifestnode belongs to.
1578 the changegroup a particular filenode or manifestnode belongs to.
1563
1579
1564 The caller can specify some nodes that must be included in the
1580 The caller can specify some nodes that must be included in the
1565 changegroup using the extranodes argument. It should be a dict
1581 changegroup using the extranodes argument. It should be a dict
1566 where the keys are the filenames (or 1 for the manifest), and the
1582 where the keys are the filenames (or 1 for the manifest), and the
1567 values are lists of (node, linknode) tuples, where node is a wanted
1583 values are lists of (node, linknode) tuples, where node is a wanted
1568 node and linknode is the changelog node that should be transmitted as
1584 node and linknode is the changelog node that should be transmitted as
1569 the linkrev.
1585 the linkrev.
1570 """
1586 """
1571
1587
1572 self.hook('preoutgoing', throw=True, source=source)
1588 self.hook('preoutgoing', throw=True, source=source)
1573
1589
1574 # Set up some initial variables
1590 # Set up some initial variables
1575 # Make it easy to refer to self.changelog
1591 # Make it easy to refer to self.changelog
1576 cl = self.changelog
1592 cl = self.changelog
1577 # msng is short for missing - compute the list of changesets in this
1593 # msng is short for missing - compute the list of changesets in this
1578 # changegroup.
1594 # changegroup.
1579 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1595 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1580 self.changegroupinfo(msng_cl_lst, source)
1596 self.changegroupinfo(msng_cl_lst, source)
1581 # Some bases may turn out to be superfluous, and some heads may be
1597 # Some bases may turn out to be superfluous, and some heads may be
1582 # too. nodesbetween will return the minimal set of bases and heads
1598 # too. nodesbetween will return the minimal set of bases and heads
1583 # necessary to re-create the changegroup.
1599 # necessary to re-create the changegroup.
1584
1600
1585 # Known heads are the list of heads that it is assumed the recipient
1601 # Known heads are the list of heads that it is assumed the recipient
1586 # of this changegroup will know about.
1602 # of this changegroup will know about.
1587 knownheads = {}
1603 knownheads = {}
1588 # We assume that all parents of bases are known heads.
1604 # We assume that all parents of bases are known heads.
1589 for n in bases:
1605 for n in bases:
1590 for p in cl.parents(n):
1606 for p in cl.parents(n):
1591 if p != nullid:
1607 if p != nullid:
1592 knownheads[p] = 1
1608 knownheads[p] = 1
1593 knownheads = knownheads.keys()
1609 knownheads = knownheads.keys()
1594 if knownheads:
1610 if knownheads:
1595 # Now that we know what heads are known, we can compute which
1611 # Now that we know what heads are known, we can compute which
1596 # changesets are known. The recipient must know about all
1612 # changesets are known. The recipient must know about all
1597 # changesets required to reach the known heads from the null
1613 # changesets required to reach the known heads from the null
1598 # changeset.
1614 # changeset.
1599 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1615 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1600 junk = None
1616 junk = None
1601 # Transform the list into an ersatz set.
1617 # Transform the list into an ersatz set.
1602 has_cl_set = dict.fromkeys(has_cl_set)
1618 has_cl_set = dict.fromkeys(has_cl_set)
1603 else:
1619 else:
1604 # If there were no known heads, the recipient cannot be assumed to
1620 # If there were no known heads, the recipient cannot be assumed to
1605 # know about any changesets.
1621 # know about any changesets.
1606 has_cl_set = {}
1622 has_cl_set = {}
1607
1623
1608 # Make it easy to refer to self.manifest
1624 # Make it easy to refer to self.manifest
1609 mnfst = self.manifest
1625 mnfst = self.manifest
1610 # We don't know which manifests are missing yet
1626 # We don't know which manifests are missing yet
1611 msng_mnfst_set = {}
1627 msng_mnfst_set = {}
1612 # Nor do we know which filenodes are missing.
1628 # Nor do we know which filenodes are missing.
1613 msng_filenode_set = {}
1629 msng_filenode_set = {}
1614
1630
1615 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1631 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1616 junk = None
1632 junk = None
1617
1633
1618 # A changeset always belongs to itself, so the changenode lookup
1634 # A changeset always belongs to itself, so the changenode lookup
1619 # function for a changenode is identity.
1635 # function for a changenode is identity.
1620 def identity(x):
1636 def identity(x):
1621 return x
1637 return x
1622
1638
1623 # A function generating function. Sets up an environment for the
1639 # A function generating function. Sets up an environment for the
1624 # inner function.
1640 # inner function.
1625 def cmp_by_rev_func(revlog):
1641 def cmp_by_rev_func(revlog):
1626 # Compare two nodes by their revision number in the environment's
1642 # Compare two nodes by their revision number in the environment's
1627 # revision history. Since the revision number both represents the
1643 # revision history. Since the revision number both represents the
1628 # most efficient order to read the nodes in, and represents a
1644 # most efficient order to read the nodes in, and represents a
1629 # topological sorting of the nodes, this function is often useful.
1645 # topological sorting of the nodes, this function is often useful.
1630 def cmp_by_rev(a, b):
1646 def cmp_by_rev(a, b):
1631 return cmp(revlog.rev(a), revlog.rev(b))
1647 return cmp(revlog.rev(a), revlog.rev(b))
1632 return cmp_by_rev
1648 return cmp_by_rev
1633
1649
1634 # If we determine that a particular file or manifest node must be a
1650 # If we determine that a particular file or manifest node must be a
1635 # node that the recipient of the changegroup will already have, we can
1651 # node that the recipient of the changegroup will already have, we can
1636 # also assume the recipient will have all the parents. This function
1652 # also assume the recipient will have all the parents. This function
1637 # prunes them from the set of missing nodes.
1653 # prunes them from the set of missing nodes.
1638 def prune_parents(revlog, hasset, msngset):
1654 def prune_parents(revlog, hasset, msngset):
1639 haslst = hasset.keys()
1655 haslst = hasset.keys()
1640 haslst.sort(cmp_by_rev_func(revlog))
1656 haslst.sort(cmp_by_rev_func(revlog))
1641 for node in haslst:
1657 for node in haslst:
1642 parentlst = [p for p in revlog.parents(node) if p != nullid]
1658 parentlst = [p for p in revlog.parents(node) if p != nullid]
1643 while parentlst:
1659 while parentlst:
1644 n = parentlst.pop()
1660 n = parentlst.pop()
1645 if n not in hasset:
1661 if n not in hasset:
1646 hasset[n] = 1
1662 hasset[n] = 1
1647 p = [p for p in revlog.parents(n) if p != nullid]
1663 p = [p for p in revlog.parents(n) if p != nullid]
1648 parentlst.extend(p)
1664 parentlst.extend(p)
1649 for n in hasset:
1665 for n in hasset:
1650 msngset.pop(n, None)
1666 msngset.pop(n, None)
1651
1667
1652 # This is a function generating function used to set up an environment
1668 # This is a function generating function used to set up an environment
1653 # for the inner function to execute in.
1669 # for the inner function to execute in.
1654 def manifest_and_file_collector(changedfileset):
1670 def manifest_and_file_collector(changedfileset):
1655 # This is an information gathering function that gathers
1671 # This is an information gathering function that gathers
1656 # information from each changeset node that goes out as part of
1672 # information from each changeset node that goes out as part of
1657 # the changegroup. The information gathered is a list of which
1673 # the changegroup. The information gathered is a list of which
1658 # manifest nodes are potentially required (the recipient may
1674 # manifest nodes are potentially required (the recipient may
1659 # already have them) and total list of all files which were
1675 # already have them) and total list of all files which were
1660 # changed in any changeset in the changegroup.
1676 # changed in any changeset in the changegroup.
1661 #
1677 #
1662 # We also remember the first changenode we saw any manifest
1678 # We also remember the first changenode we saw any manifest
1663 # referenced by so we can later determine which changenode 'owns'
1679 # referenced by so we can later determine which changenode 'owns'
1664 # the manifest.
1680 # the manifest.
1665 def collect_manifests_and_files(clnode):
1681 def collect_manifests_and_files(clnode):
1666 c = cl.read(clnode)
1682 c = cl.read(clnode)
1667 for f in c[3]:
1683 for f in c[3]:
1668 # This is to make sure we only have one instance of each
1684 # This is to make sure we only have one instance of each
1669 # filename string for each filename.
1685 # filename string for each filename.
1670 changedfileset.setdefault(f, f)
1686 changedfileset.setdefault(f, f)
1671 msng_mnfst_set.setdefault(c[0], clnode)
1687 msng_mnfst_set.setdefault(c[0], clnode)
1672 return collect_manifests_and_files
1688 return collect_manifests_and_files
1673
1689
1674 # Figure out which manifest nodes (of the ones we think might be part
1690 # Figure out which manifest nodes (of the ones we think might be part
1675 # of the changegroup) the recipient must know about and remove them
1691 # of the changegroup) the recipient must know about and remove them
1676 # from the changegroup.
1692 # from the changegroup.
1677 def prune_manifests():
1693 def prune_manifests():
1678 has_mnfst_set = {}
1694 has_mnfst_set = {}
1679 for n in msng_mnfst_set:
1695 for n in msng_mnfst_set:
1680 # If a 'missing' manifest thinks it belongs to a changenode
1696 # If a 'missing' manifest thinks it belongs to a changenode
1681 # the recipient is assumed to have, obviously the recipient
1697 # the recipient is assumed to have, obviously the recipient
1682 # must have that manifest.
1698 # must have that manifest.
1683 linknode = cl.node(mnfst.linkrev(n))
1699 linknode = cl.node(mnfst.linkrev(n))
1684 if linknode in has_cl_set:
1700 if linknode in has_cl_set:
1685 has_mnfst_set[n] = 1
1701 has_mnfst_set[n] = 1
1686 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1702 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1687
1703
1688 # Use the information collected in collect_manifests_and_files to say
1704 # Use the information collected in collect_manifests_and_files to say
1689 # which changenode any manifestnode belongs to.
1705 # which changenode any manifestnode belongs to.
1690 def lookup_manifest_link(mnfstnode):
1706 def lookup_manifest_link(mnfstnode):
1691 return msng_mnfst_set[mnfstnode]
1707 return msng_mnfst_set[mnfstnode]
1692
1708
1693 # A function generating function that sets up the initial environment
1709 # A function generating function that sets up the initial environment
1694 # the inner function.
1710 # the inner function.
1695 def filenode_collector(changedfiles):
1711 def filenode_collector(changedfiles):
1696 next_rev = [0]
1712 next_rev = [0]
1697 # This gathers information from each manifestnode included in the
1713 # This gathers information from each manifestnode included in the
1698 # changegroup about which filenodes the manifest node references
1714 # changegroup about which filenodes the manifest node references
1699 # so we can include those in the changegroup too.
1715 # so we can include those in the changegroup too.
1700 #
1716 #
1701 # It also remembers which changenode each filenode belongs to. It
1717 # It also remembers which changenode each filenode belongs to. It
1702 # does this by assuming the a filenode belongs to the changenode
1718 # does this by assuming the a filenode belongs to the changenode
1703 # the first manifest that references it belongs to.
1719 # the first manifest that references it belongs to.
1704 def collect_msng_filenodes(mnfstnode):
1720 def collect_msng_filenodes(mnfstnode):
1705 r = mnfst.rev(mnfstnode)
1721 r = mnfst.rev(mnfstnode)
1706 if r == next_rev[0]:
1722 if r == next_rev[0]:
1707 # If the last rev we looked at was the one just previous,
1723 # If the last rev we looked at was the one just previous,
1708 # we only need to see a diff.
1724 # we only need to see a diff.
1709 deltamf = mnfst.readdelta(mnfstnode)
1725 deltamf = mnfst.readdelta(mnfstnode)
1710 # For each line in the delta
1726 # For each line in the delta
1711 for f, fnode in deltamf.items():
1727 for f, fnode in deltamf.items():
1712 f = changedfiles.get(f, None)
1728 f = changedfiles.get(f, None)
1713 # And if the file is in the list of files we care
1729 # And if the file is in the list of files we care
1714 # about.
1730 # about.
1715 if f is not None:
1731 if f is not None:
1716 # Get the changenode this manifest belongs to
1732 # Get the changenode this manifest belongs to
1717 clnode = msng_mnfst_set[mnfstnode]
1733 clnode = msng_mnfst_set[mnfstnode]
1718 # Create the set of filenodes for the file if
1734 # Create the set of filenodes for the file if
1719 # there isn't one already.
1735 # there isn't one already.
1720 ndset = msng_filenode_set.setdefault(f, {})
1736 ndset = msng_filenode_set.setdefault(f, {})
1721 # And set the filenode's changelog node to the
1737 # And set the filenode's changelog node to the
1722 # manifest's if it hasn't been set already.
1738 # manifest's if it hasn't been set already.
1723 ndset.setdefault(fnode, clnode)
1739 ndset.setdefault(fnode, clnode)
1724 else:
1740 else:
1725 # Otherwise we need a full manifest.
1741 # Otherwise we need a full manifest.
1726 m = mnfst.read(mnfstnode)
1742 m = mnfst.read(mnfstnode)
1727 # For every file in we care about.
1743 # For every file in we care about.
1728 for f in changedfiles:
1744 for f in changedfiles:
1729 fnode = m.get(f, None)
1745 fnode = m.get(f, None)
1730 # If it's in the manifest
1746 # If it's in the manifest
1731 if fnode is not None:
1747 if fnode is not None:
1732 # See comments above.
1748 # See comments above.
1733 clnode = msng_mnfst_set[mnfstnode]
1749 clnode = msng_mnfst_set[mnfstnode]
1734 ndset = msng_filenode_set.setdefault(f, {})
1750 ndset = msng_filenode_set.setdefault(f, {})
1735 ndset.setdefault(fnode, clnode)
1751 ndset.setdefault(fnode, clnode)
1736 # Remember the revision we hope to see next.
1752 # Remember the revision we hope to see next.
1737 next_rev[0] = r + 1
1753 next_rev[0] = r + 1
1738 return collect_msng_filenodes
1754 return collect_msng_filenodes
1739
1755
1740 # We have a list of filenodes we think we need for a file, lets remove
1756 # We have a list of filenodes we think we need for a file, lets remove
1741 # all those we now the recipient must have.
1757 # all those we now the recipient must have.
1742 def prune_filenodes(f, filerevlog):
1758 def prune_filenodes(f, filerevlog):
1743 msngset = msng_filenode_set[f]
1759 msngset = msng_filenode_set[f]
1744 hasset = {}
1760 hasset = {}
1745 # If a 'missing' filenode thinks it belongs to a changenode we
1761 # If a 'missing' filenode thinks it belongs to a changenode we
1746 # assume the recipient must have, then the recipient must have
1762 # assume the recipient must have, then the recipient must have
1747 # that filenode.
1763 # that filenode.
1748 for n in msngset:
1764 for n in msngset:
1749 clnode = cl.node(filerevlog.linkrev(n))
1765 clnode = cl.node(filerevlog.linkrev(n))
1750 if clnode in has_cl_set:
1766 if clnode in has_cl_set:
1751 hasset[n] = 1
1767 hasset[n] = 1
1752 prune_parents(filerevlog, hasset, msngset)
1768 prune_parents(filerevlog, hasset, msngset)
1753
1769
1754 # A function generator function that sets up the a context for the
1770 # A function generator function that sets up the a context for the
1755 # inner function.
1771 # inner function.
1756 def lookup_filenode_link_func(fname):
1772 def lookup_filenode_link_func(fname):
1757 msngset = msng_filenode_set[fname]
1773 msngset = msng_filenode_set[fname]
1758 # Lookup the changenode the filenode belongs to.
1774 # Lookup the changenode the filenode belongs to.
1759 def lookup_filenode_link(fnode):
1775 def lookup_filenode_link(fnode):
1760 return msngset[fnode]
1776 return msngset[fnode]
1761 return lookup_filenode_link
1777 return lookup_filenode_link
1762
1778
1763 # Add the nodes that were explicitly requested.
1779 # Add the nodes that were explicitly requested.
1764 def add_extra_nodes(name, nodes):
1780 def add_extra_nodes(name, nodes):
1765 if not extranodes or name not in extranodes:
1781 if not extranodes or name not in extranodes:
1766 return
1782 return
1767
1783
1768 for node, linknode in extranodes[name]:
1784 for node, linknode in extranodes[name]:
1769 if node not in nodes:
1785 if node not in nodes:
1770 nodes[node] = linknode
1786 nodes[node] = linknode
1771
1787
1772 # Now that we have all theses utility functions to help out and
1788 # Now that we have all theses utility functions to help out and
1773 # logically divide up the task, generate the group.
1789 # logically divide up the task, generate the group.
1774 def gengroup():
1790 def gengroup():
1775 # The set of changed files starts empty.
1791 # The set of changed files starts empty.
1776 changedfiles = {}
1792 changedfiles = {}
1777 # Create a changenode group generator that will call our functions
1793 # Create a changenode group generator that will call our functions
1778 # back to lookup the owning changenode and collect information.
1794 # back to lookup the owning changenode and collect information.
1779 group = cl.group(msng_cl_lst, identity,
1795 group = cl.group(msng_cl_lst, identity,
1780 manifest_and_file_collector(changedfiles))
1796 manifest_and_file_collector(changedfiles))
1781 for chnk in group:
1797 for chnk in group:
1782 yield chnk
1798 yield chnk
1783
1799
1784 # The list of manifests has been collected by the generator
1800 # The list of manifests has been collected by the generator
1785 # calling our functions back.
1801 # calling our functions back.
1786 prune_manifests()
1802 prune_manifests()
1787 add_extra_nodes(1, msng_mnfst_set)
1803 add_extra_nodes(1, msng_mnfst_set)
1788 msng_mnfst_lst = msng_mnfst_set.keys()
1804 msng_mnfst_lst = msng_mnfst_set.keys()
1789 # Sort the manifestnodes by revision number.
1805 # Sort the manifestnodes by revision number.
1790 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1806 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1791 # Create a generator for the manifestnodes that calls our lookup
1807 # Create a generator for the manifestnodes that calls our lookup
1792 # and data collection functions back.
1808 # and data collection functions back.
1793 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1809 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1794 filenode_collector(changedfiles))
1810 filenode_collector(changedfiles))
1795 for chnk in group:
1811 for chnk in group:
1796 yield chnk
1812 yield chnk
1797
1813
1798 # These are no longer needed, dereference and toss the memory for
1814 # These are no longer needed, dereference and toss the memory for
1799 # them.
1815 # them.
1800 msng_mnfst_lst = None
1816 msng_mnfst_lst = None
1801 msng_mnfst_set.clear()
1817 msng_mnfst_set.clear()
1802
1818
1803 if extranodes:
1819 if extranodes:
1804 for fname in extranodes:
1820 for fname in extranodes:
1805 if isinstance(fname, int):
1821 if isinstance(fname, int):
1806 continue
1822 continue
1807 add_extra_nodes(fname,
1823 add_extra_nodes(fname,
1808 msng_filenode_set.setdefault(fname, {}))
1824 msng_filenode_set.setdefault(fname, {}))
1809 changedfiles[fname] = 1
1825 changedfiles[fname] = 1
1810 changedfiles = changedfiles.keys()
1826 changedfiles = changedfiles.keys()
1811 changedfiles.sort()
1827 changedfiles.sort()
1812 # Go through all our files in order sorted by name.
1828 # Go through all our files in order sorted by name.
1813 for fname in changedfiles:
1829 for fname in changedfiles:
1814 filerevlog = self.file(fname)
1830 filerevlog = self.file(fname)
1815 if filerevlog.count() == 0:
1831 if filerevlog.count() == 0:
1816 raise util.Abort(_("empty or missing revlog for %s") % fname)
1832 raise util.Abort(_("empty or missing revlog for %s") % fname)
1817 # Toss out the filenodes that the recipient isn't really
1833 # Toss out the filenodes that the recipient isn't really
1818 # missing.
1834 # missing.
1819 if fname in msng_filenode_set:
1835 if fname in msng_filenode_set:
1820 prune_filenodes(fname, filerevlog)
1836 prune_filenodes(fname, filerevlog)
1821 msng_filenode_lst = msng_filenode_set[fname].keys()
1837 msng_filenode_lst = msng_filenode_set[fname].keys()
1822 else:
1838 else:
1823 msng_filenode_lst = []
1839 msng_filenode_lst = []
1824 # If any filenodes are left, generate the group for them,
1840 # If any filenodes are left, generate the group for them,
1825 # otherwise don't bother.
1841 # otherwise don't bother.
1826 if len(msng_filenode_lst) > 0:
1842 if len(msng_filenode_lst) > 0:
1827 yield changegroup.chunkheader(len(fname))
1843 yield changegroup.chunkheader(len(fname))
1828 yield fname
1844 yield fname
1829 # Sort the filenodes by their revision #
1845 # Sort the filenodes by their revision #
1830 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1846 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1831 # Create a group generator and only pass in a changenode
1847 # Create a group generator and only pass in a changenode
1832 # lookup function as we need to collect no information
1848 # lookup function as we need to collect no information
1833 # from filenodes.
1849 # from filenodes.
1834 group = filerevlog.group(msng_filenode_lst,
1850 group = filerevlog.group(msng_filenode_lst,
1835 lookup_filenode_link_func(fname))
1851 lookup_filenode_link_func(fname))
1836 for chnk in group:
1852 for chnk in group:
1837 yield chnk
1853 yield chnk
1838 if fname in msng_filenode_set:
1854 if fname in msng_filenode_set:
1839 # Don't need this anymore, toss it to free memory.
1855 # Don't need this anymore, toss it to free memory.
1840 del msng_filenode_set[fname]
1856 del msng_filenode_set[fname]
1841 # Signal that no more groups are left.
1857 # Signal that no more groups are left.
1842 yield changegroup.closechunk()
1858 yield changegroup.closechunk()
1843
1859
1844 if msng_cl_lst:
1860 if msng_cl_lst:
1845 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1861 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1846
1862
1847 return util.chunkbuffer(gengroup())
1863 return util.chunkbuffer(gengroup())
1848
1864
1849 def changegroup(self, basenodes, source):
1865 def changegroup(self, basenodes, source):
1850 """Generate a changegroup of all nodes that we have that a recipient
1866 """Generate a changegroup of all nodes that we have that a recipient
1851 doesn't.
1867 doesn't.
1852
1868
1853 This is much easier than the previous function as we can assume that
1869 This is much easier than the previous function as we can assume that
1854 the recipient has any changenode we aren't sending them."""
1870 the recipient has any changenode we aren't sending them."""
1855
1871
1856 self.hook('preoutgoing', throw=True, source=source)
1872 self.hook('preoutgoing', throw=True, source=source)
1857
1873
1858 cl = self.changelog
1874 cl = self.changelog
1859 nodes = cl.nodesbetween(basenodes, None)[0]
1875 nodes = cl.nodesbetween(basenodes, None)[0]
1860 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1876 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1861 self.changegroupinfo(nodes, source)
1877 self.changegroupinfo(nodes, source)
1862
1878
1863 def identity(x):
1879 def identity(x):
1864 return x
1880 return x
1865
1881
1866 def gennodelst(revlog):
1882 def gennodelst(revlog):
1867 for r in xrange(0, revlog.count()):
1883 for r in xrange(0, revlog.count()):
1868 n = revlog.node(r)
1884 n = revlog.node(r)
1869 if revlog.linkrev(n) in revset:
1885 if revlog.linkrev(n) in revset:
1870 yield n
1886 yield n
1871
1887
1872 def changed_file_collector(changedfileset):
1888 def changed_file_collector(changedfileset):
1873 def collect_changed_files(clnode):
1889 def collect_changed_files(clnode):
1874 c = cl.read(clnode)
1890 c = cl.read(clnode)
1875 for fname in c[3]:
1891 for fname in c[3]:
1876 changedfileset[fname] = 1
1892 changedfileset[fname] = 1
1877 return collect_changed_files
1893 return collect_changed_files
1878
1894
1879 def lookuprevlink_func(revlog):
1895 def lookuprevlink_func(revlog):
1880 def lookuprevlink(n):
1896 def lookuprevlink(n):
1881 return cl.node(revlog.linkrev(n))
1897 return cl.node(revlog.linkrev(n))
1882 return lookuprevlink
1898 return lookuprevlink
1883
1899
1884 def gengroup():
1900 def gengroup():
1885 # construct a list of all changed files
1901 # construct a list of all changed files
1886 changedfiles = {}
1902 changedfiles = {}
1887
1903
1888 for chnk in cl.group(nodes, identity,
1904 for chnk in cl.group(nodes, identity,
1889 changed_file_collector(changedfiles)):
1905 changed_file_collector(changedfiles)):
1890 yield chnk
1906 yield chnk
1891 changedfiles = changedfiles.keys()
1907 changedfiles = changedfiles.keys()
1892 changedfiles.sort()
1908 changedfiles.sort()
1893
1909
1894 mnfst = self.manifest
1910 mnfst = self.manifest
1895 nodeiter = gennodelst(mnfst)
1911 nodeiter = gennodelst(mnfst)
1896 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1912 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1897 yield chnk
1913 yield chnk
1898
1914
1899 for fname in changedfiles:
1915 for fname in changedfiles:
1900 filerevlog = self.file(fname)
1916 filerevlog = self.file(fname)
1901 if filerevlog.count() == 0:
1917 if filerevlog.count() == 0:
1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1918 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 nodeiter = gennodelst(filerevlog)
1919 nodeiter = gennodelst(filerevlog)
1904 nodeiter = list(nodeiter)
1920 nodeiter = list(nodeiter)
1905 if nodeiter:
1921 if nodeiter:
1906 yield changegroup.chunkheader(len(fname))
1922 yield changegroup.chunkheader(len(fname))
1907 yield fname
1923 yield fname
1908 lookup = lookuprevlink_func(filerevlog)
1924 lookup = lookuprevlink_func(filerevlog)
1909 for chnk in filerevlog.group(nodeiter, lookup):
1925 for chnk in filerevlog.group(nodeiter, lookup):
1910 yield chnk
1926 yield chnk
1911
1927
1912 yield changegroup.closechunk()
1928 yield changegroup.closechunk()
1913
1929
1914 if nodes:
1930 if nodes:
1915 self.hook('outgoing', node=hex(nodes[0]), source=source)
1931 self.hook('outgoing', node=hex(nodes[0]), source=source)
1916
1932
1917 return util.chunkbuffer(gengroup())
1933 return util.chunkbuffer(gengroup())
1918
1934
1919 def addchangegroup(self, source, srctype, url, emptyok=False):
1935 def addchangegroup(self, source, srctype, url, emptyok=False):
1920 """add changegroup to repo.
1936 """add changegroup to repo.
1921
1937
1922 return values:
1938 return values:
1923 - nothing changed or no source: 0
1939 - nothing changed or no source: 0
1924 - more heads than before: 1+added heads (2..n)
1940 - more heads than before: 1+added heads (2..n)
1925 - less heads than before: -1-removed heads (-2..-n)
1941 - less heads than before: -1-removed heads (-2..-n)
1926 - number of heads stays the same: 1
1942 - number of heads stays the same: 1
1927 """
1943 """
1928 def csmap(x):
1944 def csmap(x):
1929 self.ui.debug(_("add changeset %s\n") % short(x))
1945 self.ui.debug(_("add changeset %s\n") % short(x))
1930 return cl.count()
1946 return cl.count()
1931
1947
1932 def revmap(x):
1948 def revmap(x):
1933 return cl.rev(x)
1949 return cl.rev(x)
1934
1950
1935 if not source:
1951 if not source:
1936 return 0
1952 return 0
1937
1953
1938 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1954 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1939
1955
1940 changesets = files = revisions = 0
1956 changesets = files = revisions = 0
1941
1957
1942 # write changelog data to temp files so concurrent readers will not see
1958 # write changelog data to temp files so concurrent readers will not see
1943 # inconsistent view
1959 # inconsistent view
1944 cl = self.changelog
1960 cl = self.changelog
1945 cl.delayupdate()
1961 cl.delayupdate()
1946 oldheads = len(cl.heads())
1962 oldheads = len(cl.heads())
1947
1963
1948 tr = self.transaction()
1964 tr = self.transaction()
1949 try:
1965 try:
1950 trp = weakref.proxy(tr)
1966 trp = weakref.proxy(tr)
1951 # pull off the changeset group
1967 # pull off the changeset group
1952 self.ui.status(_("adding changesets\n"))
1968 self.ui.status(_("adding changesets\n"))
1953 cor = cl.count() - 1
1969 cor = cl.count() - 1
1954 chunkiter = changegroup.chunkiter(source)
1970 chunkiter = changegroup.chunkiter(source)
1955 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1971 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1956 raise util.Abort(_("received changelog group is empty"))
1972 raise util.Abort(_("received changelog group is empty"))
1957 cnr = cl.count() - 1
1973 cnr = cl.count() - 1
1958 changesets = cnr - cor
1974 changesets = cnr - cor
1959
1975
1960 # pull off the manifest group
1976 # pull off the manifest group
1961 self.ui.status(_("adding manifests\n"))
1977 self.ui.status(_("adding manifests\n"))
1962 chunkiter = changegroup.chunkiter(source)
1978 chunkiter = changegroup.chunkiter(source)
1963 # no need to check for empty manifest group here:
1979 # no need to check for empty manifest group here:
1964 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1980 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1965 # no new manifest will be created and the manifest group will
1981 # no new manifest will be created and the manifest group will
1966 # be empty during the pull
1982 # be empty during the pull
1967 self.manifest.addgroup(chunkiter, revmap, trp)
1983 self.manifest.addgroup(chunkiter, revmap, trp)
1968
1984
1969 # process the files
1985 # process the files
1970 self.ui.status(_("adding file changes\n"))
1986 self.ui.status(_("adding file changes\n"))
1971 while 1:
1987 while 1:
1972 f = changegroup.getchunk(source)
1988 f = changegroup.getchunk(source)
1973 if not f:
1989 if not f:
1974 break
1990 break
1975 self.ui.debug(_("adding %s revisions\n") % f)
1991 self.ui.debug(_("adding %s revisions\n") % f)
1976 fl = self.file(f)
1992 fl = self.file(f)
1977 o = fl.count()
1993 o = fl.count()
1978 chunkiter = changegroup.chunkiter(source)
1994 chunkiter = changegroup.chunkiter(source)
1979 if fl.addgroup(chunkiter, revmap, trp) is None:
1995 if fl.addgroup(chunkiter, revmap, trp) is None:
1980 raise util.Abort(_("received file revlog group is empty"))
1996 raise util.Abort(_("received file revlog group is empty"))
1981 revisions += fl.count() - o
1997 revisions += fl.count() - o
1982 files += 1
1998 files += 1
1983
1999
1984 # make changelog see real files again
2000 # make changelog see real files again
1985 cl.finalize(trp)
2001 cl.finalize(trp)
1986
2002
1987 newheads = len(self.changelog.heads())
2003 newheads = len(self.changelog.heads())
1988 heads = ""
2004 heads = ""
1989 if oldheads and newheads != oldheads:
2005 if oldheads and newheads != oldheads:
1990 heads = _(" (%+d heads)") % (newheads - oldheads)
2006 heads = _(" (%+d heads)") % (newheads - oldheads)
1991
2007
1992 self.ui.status(_("added %d changesets"
2008 self.ui.status(_("added %d changesets"
1993 " with %d changes to %d files%s\n")
2009 " with %d changes to %d files%s\n")
1994 % (changesets, revisions, files, heads))
2010 % (changesets, revisions, files, heads))
1995
2011
1996 if changesets > 0:
2012 if changesets > 0:
1997 self.hook('pretxnchangegroup', throw=True,
2013 self.hook('pretxnchangegroup', throw=True,
1998 node=hex(self.changelog.node(cor+1)), source=srctype,
2014 node=hex(self.changelog.node(cor+1)), source=srctype,
1999 url=url)
2015 url=url)
2000
2016
2001 tr.close()
2017 tr.close()
2002 finally:
2018 finally:
2003 del tr
2019 del tr
2004
2020
2005 if changesets > 0:
2021 if changesets > 0:
2006 # forcefully update the on-disk branch cache
2022 # forcefully update the on-disk branch cache
2007 self.ui.debug(_("updating the branch cache\n"))
2023 self.ui.debug(_("updating the branch cache\n"))
2008 self.branchcache = None
2009 self.branchtags()
2024 self.branchtags()
2010 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2025 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2011 source=srctype, url=url)
2026 source=srctype, url=url)
2012
2027
2013 for i in xrange(cor + 1, cnr + 1):
2028 for i in xrange(cor + 1, cnr + 1):
2014 self.hook("incoming", node=hex(self.changelog.node(i)),
2029 self.hook("incoming", node=hex(self.changelog.node(i)),
2015 source=srctype, url=url)
2030 source=srctype, url=url)
2016
2031
2017 # never return 0 here:
2032 # never return 0 here:
2018 if newheads < oldheads:
2033 if newheads < oldheads:
2019 return newheads - oldheads - 1
2034 return newheads - oldheads - 1
2020 else:
2035 else:
2021 return newheads - oldheads + 1
2036 return newheads - oldheads + 1
2022
2037
2023
2038
2024 def stream_in(self, remote):
2039 def stream_in(self, remote):
2025 fp = remote.stream_out()
2040 fp = remote.stream_out()
2026 l = fp.readline()
2041 l = fp.readline()
2027 try:
2042 try:
2028 resp = int(l)
2043 resp = int(l)
2029 except ValueError:
2044 except ValueError:
2030 raise util.UnexpectedOutput(
2045 raise util.UnexpectedOutput(
2031 _('Unexpected response from remote server:'), l)
2046 _('Unexpected response from remote server:'), l)
2032 if resp == 1:
2047 if resp == 1:
2033 raise util.Abort(_('operation forbidden by server'))
2048 raise util.Abort(_('operation forbidden by server'))
2034 elif resp == 2:
2049 elif resp == 2:
2035 raise util.Abort(_('locking the remote repository failed'))
2050 raise util.Abort(_('locking the remote repository failed'))
2036 elif resp != 0:
2051 elif resp != 0:
2037 raise util.Abort(_('the server sent an unknown error code'))
2052 raise util.Abort(_('the server sent an unknown error code'))
2038 self.ui.status(_('streaming all changes\n'))
2053 self.ui.status(_('streaming all changes\n'))
2039 l = fp.readline()
2054 l = fp.readline()
2040 try:
2055 try:
2041 total_files, total_bytes = map(int, l.split(' ', 1))
2056 total_files, total_bytes = map(int, l.split(' ', 1))
2042 except ValueError, TypeError:
2057 except ValueError, TypeError:
2043 raise util.UnexpectedOutput(
2058 raise util.UnexpectedOutput(
2044 _('Unexpected response from remote server:'), l)
2059 _('Unexpected response from remote server:'), l)
2045 self.ui.status(_('%d files to transfer, %s of data\n') %
2060 self.ui.status(_('%d files to transfer, %s of data\n') %
2046 (total_files, util.bytecount(total_bytes)))
2061 (total_files, util.bytecount(total_bytes)))
2047 start = time.time()
2062 start = time.time()
2048 for i in xrange(total_files):
2063 for i in xrange(total_files):
2049 # XXX doesn't support '\n' or '\r' in filenames
2064 # XXX doesn't support '\n' or '\r' in filenames
2050 l = fp.readline()
2065 l = fp.readline()
2051 try:
2066 try:
2052 name, size = l.split('\0', 1)
2067 name, size = l.split('\0', 1)
2053 size = int(size)
2068 size = int(size)
2054 except ValueError, TypeError:
2069 except ValueError, TypeError:
2055 raise util.UnexpectedOutput(
2070 raise util.UnexpectedOutput(
2056 _('Unexpected response from remote server:'), l)
2071 _('Unexpected response from remote server:'), l)
2057 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2072 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2058 ofp = self.sopener(name, 'w')
2073 ofp = self.sopener(name, 'w')
2059 for chunk in util.filechunkiter(fp, limit=size):
2074 for chunk in util.filechunkiter(fp, limit=size):
2060 ofp.write(chunk)
2075 ofp.write(chunk)
2061 ofp.close()
2076 ofp.close()
2062 elapsed = time.time() - start
2077 elapsed = time.time() - start
2063 if elapsed <= 0:
2078 if elapsed <= 0:
2064 elapsed = 0.001
2079 elapsed = 0.001
2065 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2080 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2066 (util.bytecount(total_bytes), elapsed,
2081 (util.bytecount(total_bytes), elapsed,
2067 util.bytecount(total_bytes / elapsed)))
2082 util.bytecount(total_bytes / elapsed)))
2068 self.invalidate()
2083 self.invalidate()
2069 return len(self.heads()) + 1
2084 return len(self.heads()) + 1
2070
2085
2071 def clone(self, remote, heads=[], stream=False):
2086 def clone(self, remote, heads=[], stream=False):
2072 '''clone remote repository.
2087 '''clone remote repository.
2073
2088
2074 keyword arguments:
2089 keyword arguments:
2075 heads: list of revs to clone (forces use of pull)
2090 heads: list of revs to clone (forces use of pull)
2076 stream: use streaming clone if possible'''
2091 stream: use streaming clone if possible'''
2077
2092
2078 # now, all clients that can request uncompressed clones can
2093 # now, all clients that can request uncompressed clones can
2079 # read repo formats supported by all servers that can serve
2094 # read repo formats supported by all servers that can serve
2080 # them.
2095 # them.
2081
2096
2082 # if revlog format changes, client will have to check version
2097 # if revlog format changes, client will have to check version
2083 # and format flags on "stream" capability, and use
2098 # and format flags on "stream" capability, and use
2084 # uncompressed only if compatible.
2099 # uncompressed only if compatible.
2085
2100
2086 if stream and not heads and remote.capable('stream'):
2101 if stream and not heads and remote.capable('stream'):
2087 return self.stream_in(remote)
2102 return self.stream_in(remote)
2088 return self.pull(remote, heads)
2103 return self.pull(remote, heads)
2089
2104
2090 # used to avoid circular references so destructors work
2105 # used to avoid circular references so destructors work
2091 def aftertrans(files):
2106 def aftertrans(files):
2092 renamefiles = [tuple(t) for t in files]
2107 renamefiles = [tuple(t) for t in files]
2093 def a():
2108 def a():
2094 for src, dest in renamefiles:
2109 for src, dest in renamefiles:
2095 util.rename(src, dest)
2110 util.rename(src, dest)
2096 return a
2111 return a
2097
2112
2098 def instance(ui, path, create):
2113 def instance(ui, path, create):
2099 return localrepository(ui, util.drop_scheme('file', path), create)
2114 return localrepository(ui, util.drop_scheme('file', path), create)
2100
2115
2101 def islocal(path):
2116 def islocal(path):
2102 return True
2117 return True
General Comments 0
You need to be logged in to leave comments. Login now