##// END OF EJS Templates
Merge with crew-stable
Brendan Cully -
r4182:ba51a822 merge default
parent child Browse files
Show More
@@ -1,2222 +1,2227 b''
1 # queue.py - patch queues for mercurial
1 # queue.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 import os, sys, re, errno
34 import os, sys, re, errno
35
35
36 commands.norepo += " qclone qversion"
36 commands.norepo += " qclone qversion"
37
37
38 # Patch names looks like unix-file names.
38 # Patch names looks like unix-file names.
39 # They must be joinable with queue directory and result in the patch path.
39 # They must be joinable with queue directory and result in the patch path.
40 normname = util.normpath
40 normname = util.normpath
41
41
42 class statusentry:
42 class statusentry:
43 def __init__(self, rev, name=None):
43 def __init__(self, rev, name=None):
44 if not name:
44 if not name:
45 fields = rev.split(':', 1)
45 fields = rev.split(':', 1)
46 if len(fields) == 2:
46 if len(fields) == 2:
47 self.rev, self.name = fields
47 self.rev, self.name = fields
48 else:
48 else:
49 self.rev, self.name = None, None
49 self.rev, self.name = None, None
50 else:
50 else:
51 self.rev, self.name = rev, name
51 self.rev, self.name = rev, name
52
52
53 def __str__(self):
53 def __str__(self):
54 return self.rev + ':' + self.name
54 return self.rev + ':' + self.name
55
55
56 class queue:
56 class queue:
57 def __init__(self, ui, path, patchdir=None):
57 def __init__(self, ui, path, patchdir=None):
58 self.basepath = path
58 self.basepath = path
59 self.path = patchdir or os.path.join(path, "patches")
59 self.path = patchdir or os.path.join(path, "patches")
60 self.opener = util.opener(self.path)
60 self.opener = util.opener(self.path)
61 self.ui = ui
61 self.ui = ui
62 self.applied = []
62 self.applied = []
63 self.full_series = []
63 self.full_series = []
64 self.applied_dirty = 0
64 self.applied_dirty = 0
65 self.series_dirty = 0
65 self.series_dirty = 0
66 self.series_path = "series"
66 self.series_path = "series"
67 self.status_path = "status"
67 self.status_path = "status"
68 self.guards_path = "guards"
68 self.guards_path = "guards"
69 self.active_guards = None
69 self.active_guards = None
70 self.guards_dirty = False
70 self.guards_dirty = False
71 self._diffopts = None
71 self._diffopts = None
72
72
73 if os.path.exists(self.join(self.series_path)):
73 if os.path.exists(self.join(self.series_path)):
74 self.full_series = self.opener(self.series_path).read().splitlines()
74 self.full_series = self.opener(self.series_path).read().splitlines()
75 self.parse_series()
75 self.parse_series()
76
76
77 if os.path.exists(self.join(self.status_path)):
77 if os.path.exists(self.join(self.status_path)):
78 lines = self.opener(self.status_path).read().splitlines()
78 lines = self.opener(self.status_path).read().splitlines()
79 self.applied = [statusentry(l) for l in lines]
79 self.applied = [statusentry(l) for l in lines]
80
80
81 def diffopts(self):
81 def diffopts(self):
82 if self._diffopts is None:
82 if self._diffopts is None:
83 self._diffopts = patch.diffopts(self.ui)
83 self._diffopts = patch.diffopts(self.ui)
84 return self._diffopts
84 return self._diffopts
85
85
86 def join(self, *p):
86 def join(self, *p):
87 return os.path.join(self.path, *p)
87 return os.path.join(self.path, *p)
88
88
89 def find_series(self, patch):
89 def find_series(self, patch):
90 pre = re.compile("(\s*)([^#]+)")
90 pre = re.compile("(\s*)([^#]+)")
91 index = 0
91 index = 0
92 for l in self.full_series:
92 for l in self.full_series:
93 m = pre.match(l)
93 m = pre.match(l)
94 if m:
94 if m:
95 s = m.group(2)
95 s = m.group(2)
96 s = s.rstrip()
96 s = s.rstrip()
97 if s == patch:
97 if s == patch:
98 return index
98 return index
99 index += 1
99 index += 1
100 return None
100 return None
101
101
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103
103
104 def parse_series(self):
104 def parse_series(self):
105 self.series = []
105 self.series = []
106 self.series_guards = []
106 self.series_guards = []
107 for l in self.full_series:
107 for l in self.full_series:
108 h = l.find('#')
108 h = l.find('#')
109 if h == -1:
109 if h == -1:
110 patch = l
110 patch = l
111 comment = ''
111 comment = ''
112 elif h == 0:
112 elif h == 0:
113 continue
113 continue
114 else:
114 else:
115 patch = l[:h]
115 patch = l[:h]
116 comment = l[h:]
116 comment = l[h:]
117 patch = patch.strip()
117 patch = patch.strip()
118 if patch:
118 if patch:
119 if patch in self.series:
119 if patch in self.series:
120 raise util.Abort(_('%s appears more than once in %s') %
120 raise util.Abort(_('%s appears more than once in %s') %
121 (patch, self.join(self.series_path)))
121 (patch, self.join(self.series_path)))
122 self.series.append(patch)
122 self.series.append(patch)
123 self.series_guards.append(self.guard_re.findall(comment))
123 self.series_guards.append(self.guard_re.findall(comment))
124
124
125 def check_guard(self, guard):
125 def check_guard(self, guard):
126 bad_chars = '# \t\r\n\f'
126 bad_chars = '# \t\r\n\f'
127 first = guard[0]
127 first = guard[0]
128 for c in '-+':
128 for c in '-+':
129 if first == c:
129 if first == c:
130 return (_('guard %r starts with invalid character: %r') %
130 return (_('guard %r starts with invalid character: %r') %
131 (guard, c))
131 (guard, c))
132 for c in bad_chars:
132 for c in bad_chars:
133 if c in guard:
133 if c in guard:
134 return _('invalid character in guard %r: %r') % (guard, c)
134 return _('invalid character in guard %r: %r') % (guard, c)
135
135
136 def set_active(self, guards):
136 def set_active(self, guards):
137 for guard in guards:
137 for guard in guards:
138 bad = self.check_guard(guard)
138 bad = self.check_guard(guard)
139 if bad:
139 if bad:
140 raise util.Abort(bad)
140 raise util.Abort(bad)
141 guards = dict.fromkeys(guards).keys()
141 guards = dict.fromkeys(guards).keys()
142 guards.sort()
142 guards.sort()
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 self.active_guards = guards
144 self.active_guards = guards
145 self.guards_dirty = True
145 self.guards_dirty = True
146
146
147 def active(self):
147 def active(self):
148 if self.active_guards is None:
148 if self.active_guards is None:
149 self.active_guards = []
149 self.active_guards = []
150 try:
150 try:
151 guards = self.opener(self.guards_path).read().split()
151 guards = self.opener(self.guards_path).read().split()
152 except IOError, err:
152 except IOError, err:
153 if err.errno != errno.ENOENT: raise
153 if err.errno != errno.ENOENT: raise
154 guards = []
154 guards = []
155 for i, guard in enumerate(guards):
155 for i, guard in enumerate(guards):
156 bad = self.check_guard(guard)
156 bad = self.check_guard(guard)
157 if bad:
157 if bad:
158 self.ui.warn('%s:%d: %s\n' %
158 self.ui.warn('%s:%d: %s\n' %
159 (self.join(self.guards_path), i + 1, bad))
159 (self.join(self.guards_path), i + 1, bad))
160 else:
160 else:
161 self.active_guards.append(guard)
161 self.active_guards.append(guard)
162 return self.active_guards
162 return self.active_guards
163
163
164 def set_guards(self, idx, guards):
164 def set_guards(self, idx, guards):
165 for g in guards:
165 for g in guards:
166 if len(g) < 2:
166 if len(g) < 2:
167 raise util.Abort(_('guard %r too short') % g)
167 raise util.Abort(_('guard %r too short') % g)
168 if g[0] not in '-+':
168 if g[0] not in '-+':
169 raise util.Abort(_('guard %r starts with invalid char') % g)
169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 bad = self.check_guard(g[1:])
170 bad = self.check_guard(g[1:])
171 if bad:
171 if bad:
172 raise util.Abort(bad)
172 raise util.Abort(bad)
173 drop = self.guard_re.sub('', self.full_series[idx])
173 drop = self.guard_re.sub('', self.full_series[idx])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 self.parse_series()
175 self.parse_series()
176 self.series_dirty = True
176 self.series_dirty = True
177
177
178 def pushable(self, idx):
178 def pushable(self, idx):
179 if isinstance(idx, str):
179 if isinstance(idx, str):
180 idx = self.series.index(idx)
180 idx = self.series.index(idx)
181 patchguards = self.series_guards[idx]
181 patchguards = self.series_guards[idx]
182 if not patchguards:
182 if not patchguards:
183 return True, None
183 return True, None
184 default = False
184 default = False
185 guards = self.active()
185 guards = self.active()
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 if exactneg:
187 if exactneg:
188 return False, exactneg[0]
188 return False, exactneg[0]
189 pos = [g for g in patchguards if g[0] == '+']
189 pos = [g for g in patchguards if g[0] == '+']
190 exactpos = [g for g in pos if g[1:] in guards]
190 exactpos = [g for g in pos if g[1:] in guards]
191 if pos:
191 if pos:
192 if exactpos:
192 if exactpos:
193 return True, exactpos[0]
193 return True, exactpos[0]
194 return False, pos
194 return False, pos
195 return True, ''
195 return True, ''
196
196
197 def explain_pushable(self, idx, all_patches=False):
197 def explain_pushable(self, idx, all_patches=False):
198 write = all_patches and self.ui.write or self.ui.warn
198 write = all_patches and self.ui.write or self.ui.warn
199 if all_patches or self.ui.verbose:
199 if all_patches or self.ui.verbose:
200 if isinstance(idx, str):
200 if isinstance(idx, str):
201 idx = self.series.index(idx)
201 idx = self.series.index(idx)
202 pushable, why = self.pushable(idx)
202 pushable, why = self.pushable(idx)
203 if all_patches and pushable:
203 if all_patches and pushable:
204 if why is None:
204 if why is None:
205 write(_('allowing %s - no guards in effect\n') %
205 write(_('allowing %s - no guards in effect\n') %
206 self.series[idx])
206 self.series[idx])
207 else:
207 else:
208 if not why:
208 if not why:
209 write(_('allowing %s - no matching negative guards\n') %
209 write(_('allowing %s - no matching negative guards\n') %
210 self.series[idx])
210 self.series[idx])
211 else:
211 else:
212 write(_('allowing %s - guarded by %r\n') %
212 write(_('allowing %s - guarded by %r\n') %
213 (self.series[idx], why))
213 (self.series[idx], why))
214 if not pushable:
214 if not pushable:
215 if why:
215 if why:
216 write(_('skipping %s - guarded by %r\n') %
216 write(_('skipping %s - guarded by %r\n') %
217 (self.series[idx], why))
217 (self.series[idx], why))
218 else:
218 else:
219 write(_('skipping %s - no matching guards\n') %
219 write(_('skipping %s - no matching guards\n') %
220 self.series[idx])
220 self.series[idx])
221
221
222 def save_dirty(self):
222 def save_dirty(self):
223 def write_list(items, path):
223 def write_list(items, path):
224 fp = self.opener(path, 'w')
224 fp = self.opener(path, 'w')
225 for i in items:
225 for i in items:
226 print >> fp, i
226 print >> fp, i
227 fp.close()
227 fp.close()
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231
231
232 def readheaders(self, patch):
232 def readheaders(self, patch):
233 def eatdiff(lines):
233 def eatdiff(lines):
234 while lines:
234 while lines:
235 l = lines[-1]
235 l = lines[-1]
236 if (l.startswith("diff -") or
236 if (l.startswith("diff -") or
237 l.startswith("Index:") or
237 l.startswith("Index:") or
238 l.startswith("===========")):
238 l.startswith("===========")):
239 del lines[-1]
239 del lines[-1]
240 else:
240 else:
241 break
241 break
242 def eatempty(lines):
242 def eatempty(lines):
243 while lines:
243 while lines:
244 l = lines[-1]
244 l = lines[-1]
245 if re.match('\s*$', l):
245 if re.match('\s*$', l):
246 del lines[-1]
246 del lines[-1]
247 else:
247 else:
248 break
248 break
249
249
250 pf = self.join(patch)
250 pf = self.join(patch)
251 message = []
251 message = []
252 comments = []
252 comments = []
253 user = None
253 user = None
254 date = None
254 date = None
255 format = None
255 format = None
256 subject = None
256 subject = None
257 diffstart = 0
257 diffstart = 0
258
258
259 for line in file(pf):
259 for line in file(pf):
260 line = line.rstrip()
260 line = line.rstrip()
261 if line.startswith('diff --git'):
261 if line.startswith('diff --git'):
262 diffstart = 2
262 diffstart = 2
263 break
263 break
264 if diffstart:
264 if diffstart:
265 if line.startswith('+++ '):
265 if line.startswith('+++ '):
266 diffstart = 2
266 diffstart = 2
267 break
267 break
268 if line.startswith("--- "):
268 if line.startswith("--- "):
269 diffstart = 1
269 diffstart = 1
270 continue
270 continue
271 elif format == "hgpatch":
271 elif format == "hgpatch":
272 # parse values when importing the result of an hg export
272 # parse values when importing the result of an hg export
273 if line.startswith("# User "):
273 if line.startswith("# User "):
274 user = line[7:]
274 user = line[7:]
275 elif line.startswith("# Date "):
275 elif line.startswith("# Date "):
276 date = line[7:]
276 date = line[7:]
277 elif not line.startswith("# ") and line:
277 elif not line.startswith("# ") and line:
278 message.append(line)
278 message.append(line)
279 format = None
279 format = None
280 elif line == '# HG changeset patch':
280 elif line == '# HG changeset patch':
281 format = "hgpatch"
281 format = "hgpatch"
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 line.startswith("subject: "))):
283 line.startswith("subject: "))):
284 subject = line[9:]
284 subject = line[9:]
285 format = "tag"
285 format = "tag"
286 elif (format != "tagdone" and (line.startswith("From: ") or
286 elif (format != "tagdone" and (line.startswith("From: ") or
287 line.startswith("from: "))):
287 line.startswith("from: "))):
288 user = line[6:]
288 user = line[6:]
289 format = "tag"
289 format = "tag"
290 elif format == "tag" and line == "":
290 elif format == "tag" and line == "":
291 # when looking for tags (subject: from: etc) they
291 # when looking for tags (subject: from: etc) they
292 # end once you find a blank line in the source
292 # end once you find a blank line in the source
293 format = "tagdone"
293 format = "tagdone"
294 elif message or line:
294 elif message or line:
295 message.append(line)
295 message.append(line)
296 comments.append(line)
296 comments.append(line)
297
297
298 eatdiff(message)
298 eatdiff(message)
299 eatdiff(comments)
299 eatdiff(comments)
300 eatempty(message)
300 eatempty(message)
301 eatempty(comments)
301 eatempty(comments)
302
302
303 # make sure message isn't empty
303 # make sure message isn't empty
304 if format and format.startswith("tag") and subject:
304 if format and format.startswith("tag") and subject:
305 message.insert(0, "")
305 message.insert(0, "")
306 message.insert(0, subject)
306 message.insert(0, subject)
307 return (message, comments, user, date, diffstart > 1)
307 return (message, comments, user, date, diffstart > 1)
308
308
309 def printdiff(self, repo, node1, node2=None, files=None,
309 def printdiff(self, repo, node1, node2=None, files=None,
310 fp=None, changes=None, opts={}):
310 fp=None, changes=None, opts={}):
311 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
311 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
312
312
313 patch.diff(repo, node1, node2, fns, match=matchfn,
313 patch.diff(repo, node1, node2, fns, match=matchfn,
314 fp=fp, changes=changes, opts=self.diffopts())
314 fp=fp, changes=changes, opts=self.diffopts())
315
315
316 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
316 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
317 # first try just applying the patch
317 # first try just applying the patch
318 (err, n) = self.apply(repo, [ patch ], update_status=False,
318 (err, n) = self.apply(repo, [ patch ], update_status=False,
319 strict=True, merge=rev, wlock=wlock)
319 strict=True, merge=rev, wlock=wlock)
320
320
321 if err == 0:
321 if err == 0:
322 return (err, n)
322 return (err, n)
323
323
324 if n is None:
324 if n is None:
325 raise util.Abort(_("apply failed for patch %s") % patch)
325 raise util.Abort(_("apply failed for patch %s") % patch)
326
326
327 self.ui.warn("patch didn't work out, merging %s\n" % patch)
327 self.ui.warn("patch didn't work out, merging %s\n" % patch)
328
328
329 # apply failed, strip away that rev and merge.
329 # apply failed, strip away that rev and merge.
330 hg.clean(repo, head, wlock=wlock)
330 hg.clean(repo, head, wlock=wlock)
331 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
331 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
332
332
333 ctx = repo.changectx(rev)
333 ctx = repo.changectx(rev)
334 ret = hg.merge(repo, rev, wlock=wlock)
334 ret = hg.merge(repo, rev, wlock=wlock)
335 if ret:
335 if ret:
336 raise util.Abort(_("update returned %d") % ret)
336 raise util.Abort(_("update returned %d") % ret)
337 n = repo.commit(None, ctx.description(), ctx.user(),
337 n = repo.commit(None, ctx.description(), ctx.user(),
338 force=1, wlock=wlock)
338 force=1, wlock=wlock)
339 if n == None:
339 if n == None:
340 raise util.Abort(_("repo commit failed"))
340 raise util.Abort(_("repo commit failed"))
341 try:
341 try:
342 message, comments, user, date, patchfound = mergeq.readheaders(patch)
342 message, comments, user, date, patchfound = mergeq.readheaders(patch)
343 except:
343 except:
344 raise util.Abort(_("unable to read %s") % patch)
344 raise util.Abort(_("unable to read %s") % patch)
345
345
346 patchf = self.opener(patch, "w")
346 patchf = self.opener(patch, "w")
347 if comments:
347 if comments:
348 comments = "\n".join(comments) + '\n\n'
348 comments = "\n".join(comments) + '\n\n'
349 patchf.write(comments)
349 patchf.write(comments)
350 self.printdiff(repo, head, n, fp=patchf)
350 self.printdiff(repo, head, n, fp=patchf)
351 patchf.close()
351 patchf.close()
352 return (0, n)
352 return (0, n)
353
353
354 def qparents(self, repo, rev=None):
354 def qparents(self, repo, rev=None):
355 if rev is None:
355 if rev is None:
356 (p1, p2) = repo.dirstate.parents()
356 (p1, p2) = repo.dirstate.parents()
357 if p2 == revlog.nullid:
357 if p2 == revlog.nullid:
358 return p1
358 return p1
359 if len(self.applied) == 0:
359 if len(self.applied) == 0:
360 return None
360 return None
361 return revlog.bin(self.applied[-1].rev)
361 return revlog.bin(self.applied[-1].rev)
362 pp = repo.changelog.parents(rev)
362 pp = repo.changelog.parents(rev)
363 if pp[1] != revlog.nullid:
363 if pp[1] != revlog.nullid:
364 arevs = [ x.rev for x in self.applied ]
364 arevs = [ x.rev for x in self.applied ]
365 p0 = revlog.hex(pp[0])
365 p0 = revlog.hex(pp[0])
366 p1 = revlog.hex(pp[1])
366 p1 = revlog.hex(pp[1])
367 if p0 in arevs:
367 if p0 in arevs:
368 return pp[0]
368 return pp[0]
369 if p1 in arevs:
369 if p1 in arevs:
370 return pp[1]
370 return pp[1]
371 return pp[0]
371 return pp[0]
372
372
373 def mergepatch(self, repo, mergeq, series, wlock):
373 def mergepatch(self, repo, mergeq, series, wlock):
374 if len(self.applied) == 0:
374 if len(self.applied) == 0:
375 # each of the patches merged in will have two parents. This
375 # each of the patches merged in will have two parents. This
376 # can confuse the qrefresh, qdiff, and strip code because it
376 # can confuse the qrefresh, qdiff, and strip code because it
377 # needs to know which parent is actually in the patch queue.
377 # needs to know which parent is actually in the patch queue.
378 # so, we insert a merge marker with only one parent. This way
378 # so, we insert a merge marker with only one parent. This way
379 # the first patch in the queue is never a merge patch
379 # the first patch in the queue is never a merge patch
380 #
380 #
381 pname = ".hg.patches.merge.marker"
381 pname = ".hg.patches.merge.marker"
382 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
382 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
383 wlock=wlock)
383 wlock=wlock)
384 self.applied.append(statusentry(revlog.hex(n), pname))
384 self.applied.append(statusentry(revlog.hex(n), pname))
385 self.applied_dirty = 1
385 self.applied_dirty = 1
386
386
387 head = self.qparents(repo)
387 head = self.qparents(repo)
388
388
389 for patch in series:
389 for patch in series:
390 patch = mergeq.lookup(patch, strict=True)
390 patch = mergeq.lookup(patch, strict=True)
391 if not patch:
391 if not patch:
392 self.ui.warn("patch %s does not exist\n" % patch)
392 self.ui.warn("patch %s does not exist\n" % patch)
393 return (1, None)
393 return (1, None)
394 pushable, reason = self.pushable(patch)
394 pushable, reason = self.pushable(patch)
395 if not pushable:
395 if not pushable:
396 self.explain_pushable(patch, all_patches=True)
396 self.explain_pushable(patch, all_patches=True)
397 continue
397 continue
398 info = mergeq.isapplied(patch)
398 info = mergeq.isapplied(patch)
399 if not info:
399 if not info:
400 self.ui.warn("patch %s is not applied\n" % patch)
400 self.ui.warn("patch %s is not applied\n" % patch)
401 return (1, None)
401 return (1, None)
402 rev = revlog.bin(info[1])
402 rev = revlog.bin(info[1])
403 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
403 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
404 if head:
404 if head:
405 self.applied.append(statusentry(revlog.hex(head), patch))
405 self.applied.append(statusentry(revlog.hex(head), patch))
406 self.applied_dirty = 1
406 self.applied_dirty = 1
407 if err:
407 if err:
408 return (err, head)
408 return (err, head)
409 return (0, head)
409 return (0, head)
410
410
411 def patch(self, repo, patchfile):
411 def patch(self, repo, patchfile):
412 '''Apply patchfile to the working directory.
412 '''Apply patchfile to the working directory.
413 patchfile: file name of patch'''
413 patchfile: file name of patch'''
414 files = {}
414 files = {}
415 try:
415 try:
416 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
416 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
417 files=files)
417 files=files)
418 except Exception, inst:
418 except Exception, inst:
419 self.ui.note(str(inst) + '\n')
419 self.ui.note(str(inst) + '\n')
420 if not self.ui.verbose:
420 if not self.ui.verbose:
421 self.ui.warn("patch failed, unable to continue (try -v)\n")
421 self.ui.warn("patch failed, unable to continue (try -v)\n")
422 return (False, files, False)
422 return (False, files, False)
423
423
424 return (True, files, fuzz)
424 return (True, files, fuzz)
425
425
426 def apply(self, repo, series, list=False, update_status=True,
426 def apply(self, repo, series, list=False, update_status=True,
427 strict=False, patchdir=None, merge=None, wlock=None):
427 strict=False, patchdir=None, merge=None, wlock=None):
428 # TODO unify with commands.py
428 # TODO unify with commands.py
429 if not patchdir:
429 if not patchdir:
430 patchdir = self.path
430 patchdir = self.path
431 err = 0
431 err = 0
432 if not wlock:
432 if not wlock:
433 wlock = repo.wlock()
433 wlock = repo.wlock()
434 lock = repo.lock()
434 lock = repo.lock()
435 tr = repo.transaction()
435 tr = repo.transaction()
436 n = None
436 n = None
437 for patchname in series:
437 for patchname in series:
438 pushable, reason = self.pushable(patchname)
438 pushable, reason = self.pushable(patchname)
439 if not pushable:
439 if not pushable:
440 self.explain_pushable(patchname, all_patches=True)
440 self.explain_pushable(patchname, all_patches=True)
441 continue
441 continue
442 self.ui.warn("applying %s\n" % patchname)
442 self.ui.warn("applying %s\n" % patchname)
443 pf = os.path.join(patchdir, patchname)
443 pf = os.path.join(patchdir, patchname)
444
444
445 try:
445 try:
446 message, comments, user, date, patchfound = self.readheaders(patchname)
446 message, comments, user, date, patchfound = self.readheaders(patchname)
447 except:
447 except:
448 self.ui.warn("Unable to read %s\n" % patchname)
448 self.ui.warn("Unable to read %s\n" % patchname)
449 err = 1
449 err = 1
450 break
450 break
451
451
452 if not message:
452 if not message:
453 message = "imported patch %s\n" % patchname
453 message = "imported patch %s\n" % patchname
454 else:
454 else:
455 if list:
455 if list:
456 message.append("\nimported patch %s" % patchname)
456 message.append("\nimported patch %s" % patchname)
457 message = '\n'.join(message)
457 message = '\n'.join(message)
458
458
459 (patcherr, files, fuzz) = self.patch(repo, pf)
459 (patcherr, files, fuzz) = self.patch(repo, pf)
460 patcherr = not patcherr
460 patcherr = not patcherr
461
461
462 if merge and files:
462 if merge and files:
463 # Mark as merged and update dirstate parent info
463 # Mark as merged and update dirstate parent info
464 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
464 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
465 p1, p2 = repo.dirstate.parents()
465 p1, p2 = repo.dirstate.parents()
466 repo.dirstate.setparents(p1, merge)
466 repo.dirstate.setparents(p1, merge)
467 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
467 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
468 n = repo.commit(files, message, user, date, force=1, lock=lock,
468 n = repo.commit(files, message, user, date, force=1, lock=lock,
469 wlock=wlock)
469 wlock=wlock)
470
470
471 if n == None:
471 if n == None:
472 raise util.Abort(_("repo commit failed"))
472 raise util.Abort(_("repo commit failed"))
473
473
474 if update_status:
474 if update_status:
475 self.applied.append(statusentry(revlog.hex(n), patchname))
475 self.applied.append(statusentry(revlog.hex(n), patchname))
476
476
477 if patcherr:
477 if patcherr:
478 if not patchfound:
478 if not patchfound:
479 self.ui.warn("patch %s is empty\n" % patchname)
479 self.ui.warn("patch %s is empty\n" % patchname)
480 err = 0
480 err = 0
481 else:
481 else:
482 self.ui.warn("patch failed, rejects left in working dir\n")
482 self.ui.warn("patch failed, rejects left in working dir\n")
483 err = 1
483 err = 1
484 break
484 break
485
485
486 if fuzz and strict:
486 if fuzz and strict:
487 self.ui.warn("fuzz found when applying patch, stopping\n")
487 self.ui.warn("fuzz found when applying patch, stopping\n")
488 err = 1
488 err = 1
489 break
489 break
490 tr.close()
490 tr.close()
491 return (err, n)
491 return (err, n)
492
492
493 def delete(self, repo, patches, opts):
493 def delete(self, repo, patches, opts):
494 realpatches = []
494 realpatches = []
495 for patch in patches:
495 for patch in patches:
496 patch = self.lookup(patch, strict=True)
496 patch = self.lookup(patch, strict=True)
497 info = self.isapplied(patch)
497 info = self.isapplied(patch)
498 if info:
498 if info:
499 raise util.Abort(_("cannot delete applied patch %s") % patch)
499 raise util.Abort(_("cannot delete applied patch %s") % patch)
500 if patch not in self.series:
500 if patch not in self.series:
501 raise util.Abort(_("patch %s not in series file") % patch)
501 raise util.Abort(_("patch %s not in series file") % patch)
502 realpatches.append(patch)
502 realpatches.append(patch)
503
503
504 appliedbase = 0
504 appliedbase = 0
505 if opts.get('rev'):
505 if opts.get('rev'):
506 if not self.applied:
506 if not self.applied:
507 raise util.Abort(_('no patches applied'))
507 raise util.Abort(_('no patches applied'))
508 revs = cmdutil.revrange(repo, opts['rev'])
508 revs = cmdutil.revrange(repo, opts['rev'])
509 if len(revs) > 1 and revs[0] > revs[1]:
509 if len(revs) > 1 and revs[0] > revs[1]:
510 revs.reverse()
510 revs.reverse()
511 for rev in revs:
511 for rev in revs:
512 if appliedbase >= len(self.applied):
512 if appliedbase >= len(self.applied):
513 raise util.Abort(_("revision %d is not managed") % rev)
513 raise util.Abort(_("revision %d is not managed") % rev)
514
514
515 base = revlog.bin(self.applied[appliedbase].rev)
515 base = revlog.bin(self.applied[appliedbase].rev)
516 node = repo.changelog.node(rev)
516 node = repo.changelog.node(rev)
517 if node != base:
517 if node != base:
518 raise util.Abort(_("cannot delete revision %d above "
518 raise util.Abort(_("cannot delete revision %d above "
519 "applied patches") % rev)
519 "applied patches") % rev)
520 realpatches.append(self.applied[appliedbase].name)
520 realpatches.append(self.applied[appliedbase].name)
521 appliedbase += 1
521 appliedbase += 1
522
522
523 if not opts.get('keep'):
523 if not opts.get('keep'):
524 r = self.qrepo()
524 r = self.qrepo()
525 if r:
525 if r:
526 r.remove(realpatches, True)
526 r.remove(realpatches, True)
527 else:
527 else:
528 for p in realpatches:
528 for p in realpatches:
529 os.unlink(self.join(p))
529 os.unlink(self.join(p))
530
530
531 if appliedbase:
531 if appliedbase:
532 del self.applied[:appliedbase]
532 del self.applied[:appliedbase]
533 self.applied_dirty = 1
533 self.applied_dirty = 1
534 indices = [self.find_series(p) for p in realpatches]
534 indices = [self.find_series(p) for p in realpatches]
535 indices.sort()
535 indices.sort()
536 for i in indices[-1::-1]:
536 for i in indices[-1::-1]:
537 del self.full_series[i]
537 del self.full_series[i]
538 self.parse_series()
538 self.parse_series()
539 self.series_dirty = 1
539 self.series_dirty = 1
540
540
541 def check_toppatch(self, repo):
541 def check_toppatch(self, repo):
542 if len(self.applied) > 0:
542 if len(self.applied) > 0:
543 top = revlog.bin(self.applied[-1].rev)
543 top = revlog.bin(self.applied[-1].rev)
544 pp = repo.dirstate.parents()
544 pp = repo.dirstate.parents()
545 if top not in pp:
545 if top not in pp:
546 raise util.Abort(_("queue top not at same revision as working directory"))
546 raise util.Abort(_("queue top not at same revision as working directory"))
547 return top
547 return top
548 return None
548 return None
549 def check_localchanges(self, repo, force=False, refresh=True):
549 def check_localchanges(self, repo, force=False, refresh=True):
550 m, a, r, d = repo.status()[:4]
550 m, a, r, d = repo.status()[:4]
551 if m or a or r or d:
551 if m or a or r or d:
552 if not force:
552 if not force:
553 if refresh:
553 if refresh:
554 raise util.Abort(_("local changes found, refresh first"))
554 raise util.Abort(_("local changes found, refresh first"))
555 else:
555 else:
556 raise util.Abort(_("local changes found"))
556 raise util.Abort(_("local changes found"))
557 return m, a, r, d
557 return m, a, r, d
558 def new(self, repo, patch, msg=None, force=None):
558 def new(self, repo, patch, msg=None, force=None):
559 if os.path.exists(self.join(patch)):
559 if os.path.exists(self.join(patch)):
560 raise util.Abort(_('patch "%s" already exists') % patch)
560 raise util.Abort(_('patch "%s" already exists') % patch)
561 m, a, r, d = self.check_localchanges(repo, force)
561 m, a, r, d = self.check_localchanges(repo, force)
562 commitfiles = m + a + r
562 commitfiles = m + a + r
563 self.check_toppatch(repo)
563 self.check_toppatch(repo)
564 wlock = repo.wlock()
564 wlock = repo.wlock()
565 insert = self.full_series_end()
565 insert = self.full_series_end()
566 if msg:
566 if msg:
567 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
567 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
568 wlock=wlock)
568 wlock=wlock)
569 else:
569 else:
570 n = repo.commit(commitfiles,
570 n = repo.commit(commitfiles,
571 "New patch: %s" % patch, force=True, wlock=wlock)
571 "New patch: %s" % patch, force=True, wlock=wlock)
572 if n == None:
572 if n == None:
573 raise util.Abort(_("repo commit failed"))
573 raise util.Abort(_("repo commit failed"))
574 self.full_series[insert:insert] = [patch]
574 self.full_series[insert:insert] = [patch]
575 self.applied.append(statusentry(revlog.hex(n), patch))
575 self.applied.append(statusentry(revlog.hex(n), patch))
576 self.parse_series()
576 self.parse_series()
577 self.series_dirty = 1
577 self.series_dirty = 1
578 self.applied_dirty = 1
578 self.applied_dirty = 1
579 p = self.opener(patch, "w")
579 p = self.opener(patch, "w")
580 if msg:
580 if msg:
581 msg = msg + "\n"
581 msg = msg + "\n"
582 p.write(msg)
582 p.write(msg)
583 p.close()
583 p.close()
584 wlock = None
584 wlock = None
585 r = self.qrepo()
585 r = self.qrepo()
586 if r: r.add([patch])
586 if r: r.add([patch])
587 if commitfiles:
587 if commitfiles:
588 self.refresh(repo, short=True)
588 self.refresh(repo, short=True)
589
589
590 def strip(self, repo, rev, update=True, backup="all", wlock=None):
590 def strip(self, repo, rev, update=True, backup="all", wlock=None):
591 def limitheads(chlog, stop):
591 def limitheads(chlog, stop):
592 """return the list of all nodes that have no children"""
592 """return the list of all nodes that have no children"""
593 p = {}
593 p = {}
594 h = []
594 h = []
595 stoprev = 0
595 stoprev = 0
596 if stop in chlog.nodemap:
596 if stop in chlog.nodemap:
597 stoprev = chlog.rev(stop)
597 stoprev = chlog.rev(stop)
598
598
599 for r in xrange(chlog.count() - 1, -1, -1):
599 for r in xrange(chlog.count() - 1, -1, -1):
600 n = chlog.node(r)
600 n = chlog.node(r)
601 if n not in p:
601 if n not in p:
602 h.append(n)
602 h.append(n)
603 if n == stop:
603 if n == stop:
604 break
604 break
605 if r < stoprev:
605 if r < stoprev:
606 break
606 break
607 for pn in chlog.parents(n):
607 for pn in chlog.parents(n):
608 p[pn] = 1
608 p[pn] = 1
609 return h
609 return h
610
610
611 def bundle(cg):
611 def bundle(cg):
612 backupdir = repo.join("strip-backup")
612 backupdir = repo.join("strip-backup")
613 if not os.path.isdir(backupdir):
613 if not os.path.isdir(backupdir):
614 os.mkdir(backupdir)
614 os.mkdir(backupdir)
615 name = os.path.join(backupdir, "%s" % revlog.short(rev))
615 name = os.path.join(backupdir, "%s" % revlog.short(rev))
616 name = savename(name)
616 name = savename(name)
617 self.ui.warn("saving bundle to %s\n" % name)
617 self.ui.warn("saving bundle to %s\n" % name)
618 return changegroup.writebundle(cg, name, "HG10BZ")
618 return changegroup.writebundle(cg, name, "HG10BZ")
619
619
620 def stripall(revnum):
620 def stripall(revnum):
621 mm = repo.changectx(rev).manifest()
621 mm = repo.changectx(rev).manifest()
622 seen = {}
622 seen = {}
623
623
624 for x in xrange(revnum, repo.changelog.count()):
624 for x in xrange(revnum, repo.changelog.count()):
625 for f in repo.changectx(x).files():
625 for f in repo.changectx(x).files():
626 if f in seen:
626 if f in seen:
627 continue
627 continue
628 seen[f] = 1
628 seen[f] = 1
629 if f in mm:
629 if f in mm:
630 filerev = mm[f]
630 filerev = mm[f]
631 else:
631 else:
632 filerev = 0
632 filerev = 0
633 seen[f] = filerev
633 seen[f] = filerev
634 # we go in two steps here so the strip loop happens in a
634 # we go in two steps here so the strip loop happens in a
635 # sensible order. When stripping many files, this helps keep
635 # sensible order. When stripping many files, this helps keep
636 # our disk access patterns under control.
636 # our disk access patterns under control.
637 seen_list = seen.keys()
637 seen_list = seen.keys()
638 seen_list.sort()
638 seen_list.sort()
639 for f in seen_list:
639 for f in seen_list:
640 ff = repo.file(f)
640 ff = repo.file(f)
641 filerev = seen[f]
641 filerev = seen[f]
642 if filerev != 0:
642 if filerev != 0:
643 if filerev in ff.nodemap:
643 if filerev in ff.nodemap:
644 filerev = ff.rev(filerev)
644 filerev = ff.rev(filerev)
645 else:
645 else:
646 filerev = 0
646 filerev = 0
647 ff.strip(filerev, revnum)
647 ff.strip(filerev, revnum)
648
648
649 if not wlock:
649 if not wlock:
650 wlock = repo.wlock()
650 wlock = repo.wlock()
651 lock = repo.lock()
651 lock = repo.lock()
652 chlog = repo.changelog
652 chlog = repo.changelog
653 # TODO delete the undo files, and handle undo of merge sets
653 # TODO delete the undo files, and handle undo of merge sets
654 pp = chlog.parents(rev)
654 pp = chlog.parents(rev)
655 revnum = chlog.rev(rev)
655 revnum = chlog.rev(rev)
656
656
657 if update:
657 if update:
658 self.check_localchanges(repo, refresh=False)
658 self.check_localchanges(repo, refresh=False)
659 urev = self.qparents(repo, rev)
659 urev = self.qparents(repo, rev)
660 hg.clean(repo, urev, wlock=wlock)
660 hg.clean(repo, urev, wlock=wlock)
661 repo.dirstate.write()
661 repo.dirstate.write()
662
662
663 # save is a list of all the branches we are truncating away
663 # save is a list of all the branches we are truncating away
664 # that we actually want to keep. changegroup will be used
664 # that we actually want to keep. changegroup will be used
665 # to preserve them and add them back after the truncate
665 # to preserve them and add them back after the truncate
666 saveheads = []
666 saveheads = []
667 savebases = {}
667 savebases = {}
668
668
669 heads = limitheads(chlog, rev)
669 heads = limitheads(chlog, rev)
670 seen = {}
670 seen = {}
671
671
672 # search through all the heads, finding those where the revision
672 # search through all the heads, finding those where the revision
673 # we want to strip away is an ancestor. Also look for merges
673 # we want to strip away is an ancestor. Also look for merges
674 # that might be turned into new heads by the strip.
674 # that might be turned into new heads by the strip.
675 while heads:
675 while heads:
676 h = heads.pop()
676 h = heads.pop()
677 n = h
677 n = h
678 while True:
678 while True:
679 seen[n] = 1
679 seen[n] = 1
680 pp = chlog.parents(n)
680 pp = chlog.parents(n)
681 if pp[1] != revlog.nullid:
681 if pp[1] != revlog.nullid:
682 for p in pp:
682 for p in pp:
683 if chlog.rev(p) > revnum and p not in seen:
683 if chlog.rev(p) > revnum and p not in seen:
684 heads.append(p)
684 heads.append(p)
685 if pp[0] == revlog.nullid:
685 if pp[0] == revlog.nullid:
686 break
686 break
687 if chlog.rev(pp[0]) < revnum:
687 if chlog.rev(pp[0]) < revnum:
688 break
688 break
689 n = pp[0]
689 n = pp[0]
690 if n == rev:
690 if n == rev:
691 break
691 break
692 r = chlog.reachable(h, rev)
692 r = chlog.reachable(h, rev)
693 if rev not in r:
693 if rev not in r:
694 saveheads.append(h)
694 saveheads.append(h)
695 for x in r:
695 for x in r:
696 if chlog.rev(x) > revnum:
696 if chlog.rev(x) > revnum:
697 savebases[x] = 1
697 savebases[x] = 1
698
698
699 # create a changegroup for all the branches we need to keep
699 # create a changegroup for all the branches we need to keep
700 if backup == "all":
700 if backup == "all":
701 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
701 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
702 bundle(backupch)
702 bundle(backupch)
703 if saveheads:
703 if saveheads:
704 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
704 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
705 chgrpfile = bundle(backupch)
705 chgrpfile = bundle(backupch)
706
706
707 stripall(revnum)
707 stripall(revnum)
708
708
709 change = chlog.read(rev)
709 change = chlog.read(rev)
710 chlog.strip(revnum, revnum)
710 chlog.strip(revnum, revnum)
711 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
711 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
712 if saveheads:
712 if saveheads:
713 self.ui.status("adding branch\n")
713 self.ui.status("adding branch\n")
714 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
714 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
715 update=False)
715 update=False)
716 if backup != "strip":
716 if backup != "strip":
717 os.unlink(chgrpfile)
717 os.unlink(chgrpfile)
718
718
719 def isapplied(self, patch):
719 def isapplied(self, patch):
720 """returns (index, rev, patch)"""
720 """returns (index, rev, patch)"""
721 for i in xrange(len(self.applied)):
721 for i in xrange(len(self.applied)):
722 a = self.applied[i]
722 a = self.applied[i]
723 if a.name == patch:
723 if a.name == patch:
724 return (i, a.rev, a.name)
724 return (i, a.rev, a.name)
725 return None
725 return None
726
726
727 # if the exact patch name does not exist, we try a few
727 # if the exact patch name does not exist, we try a few
728 # variations. If strict is passed, we try only #1
728 # variations. If strict is passed, we try only #1
729 #
729 #
730 # 1) a number to indicate an offset in the series file
730 # 1) a number to indicate an offset in the series file
731 # 2) a unique substring of the patch name was given
731 # 2) a unique substring of the patch name was given
732 # 3) patchname[-+]num to indicate an offset in the series file
732 # 3) patchname[-+]num to indicate an offset in the series file
733 def lookup(self, patch, strict=False):
733 def lookup(self, patch, strict=False):
734 patch = patch and str(patch)
734 patch = patch and str(patch)
735
735
736 def partial_name(s):
736 def partial_name(s):
737 if s in self.series:
737 if s in self.series:
738 return s
738 return s
739 matches = [x for x in self.series if s in x]
739 matches = [x for x in self.series if s in x]
740 if len(matches) > 1:
740 if len(matches) > 1:
741 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
741 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
742 for m in matches:
742 for m in matches:
743 self.ui.warn(' %s\n' % m)
743 self.ui.warn(' %s\n' % m)
744 return None
744 return None
745 if matches:
745 if matches:
746 return matches[0]
746 return matches[0]
747 if len(self.series) > 0 and len(self.applied) > 0:
747 if len(self.series) > 0 and len(self.applied) > 0:
748 if s == 'qtip':
748 if s == 'qtip':
749 return self.series[self.series_end(True)-1]
749 return self.series[self.series_end(True)-1]
750 if s == 'qbase':
750 if s == 'qbase':
751 return self.series[0]
751 return self.series[0]
752 return None
752 return None
753 if patch == None:
753 if patch == None:
754 return None
754 return None
755
755
756 # we don't want to return a partial match until we make
756 # we don't want to return a partial match until we make
757 # sure the file name passed in does not exist (checked below)
757 # sure the file name passed in does not exist (checked below)
758 res = partial_name(patch)
758 res = partial_name(patch)
759 if res and res == patch:
759 if res and res == patch:
760 return res
760 return res
761
761
762 if not os.path.isfile(self.join(patch)):
762 if not os.path.isfile(self.join(patch)):
763 try:
763 try:
764 sno = int(patch)
764 sno = int(patch)
765 except(ValueError, OverflowError):
765 except(ValueError, OverflowError):
766 pass
766 pass
767 else:
767 else:
768 if sno < len(self.series):
768 if sno < len(self.series):
769 return self.series[sno]
769 return self.series[sno]
770 if not strict:
770 if not strict:
771 # return any partial match made above
771 # return any partial match made above
772 if res:
772 if res:
773 return res
773 return res
774 minus = patch.rfind('-')
774 minus = patch.rfind('-')
775 if minus >= 0:
775 if minus >= 0:
776 res = partial_name(patch[:minus])
776 res = partial_name(patch[:minus])
777 if res:
777 if res:
778 i = self.series.index(res)
778 i = self.series.index(res)
779 try:
779 try:
780 off = int(patch[minus+1:] or 1)
780 off = int(patch[minus+1:] or 1)
781 except(ValueError, OverflowError):
781 except(ValueError, OverflowError):
782 pass
782 pass
783 else:
783 else:
784 if i - off >= 0:
784 if i - off >= 0:
785 return self.series[i - off]
785 return self.series[i - off]
786 plus = patch.rfind('+')
786 plus = patch.rfind('+')
787 if plus >= 0:
787 if plus >= 0:
788 res = partial_name(patch[:plus])
788 res = partial_name(patch[:plus])
789 if res:
789 if res:
790 i = self.series.index(res)
790 i = self.series.index(res)
791 try:
791 try:
792 off = int(patch[plus+1:] or 1)
792 off = int(patch[plus+1:] or 1)
793 except(ValueError, OverflowError):
793 except(ValueError, OverflowError):
794 pass
794 pass
795 else:
795 else:
796 if i + off < len(self.series):
796 if i + off < len(self.series):
797 return self.series[i + off]
797 return self.series[i + off]
798 raise util.Abort(_("patch %s not in series") % patch)
798 raise util.Abort(_("patch %s not in series") % patch)
799
799
800 def push(self, repo, patch=None, force=False, list=False,
800 def push(self, repo, patch=None, force=False, list=False,
801 mergeq=None, wlock=None):
801 mergeq=None, wlock=None):
802 if not wlock:
802 if not wlock:
803 wlock = repo.wlock()
803 wlock = repo.wlock()
804 patch = self.lookup(patch)
804 patch = self.lookup(patch)
805 # Suppose our series file is: A B C and the current 'top' patch is B.
805 # Suppose our series file is: A B C and the current 'top' patch is B.
806 # qpush C should be performed (moving forward)
806 # qpush C should be performed (moving forward)
807 # qpush B is a NOP (no change)
807 # qpush B is a NOP (no change)
808 # qpush A is an error (can't go backwards with qpush)
808 # qpush A is an error (can't go backwards with qpush)
809 if patch:
809 if patch:
810 info = self.isapplied(patch)
810 info = self.isapplied(patch)
811 if info:
811 if info:
812 if info[0] < len(self.applied) - 1:
812 if info[0] < len(self.applied) - 1:
813 raise util.Abort(_("cannot push to a previous patch: %s") %
813 raise util.Abort(_("cannot push to a previous patch: %s") %
814 patch)
814 patch)
815 if info[0] < len(self.series) - 1:
815 if info[0] < len(self.series) - 1:
816 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
816 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
817 else:
817 else:
818 self.ui.warn(_('all patches are currently applied\n'))
818 self.ui.warn(_('all patches are currently applied\n'))
819 return
819 return
820
820
821 # Following the above example, starting at 'top' of B:
821 # Following the above example, starting at 'top' of B:
822 # qpush should be performed (pushes C), but a subsequent qpush without
822 # qpush should be performed (pushes C), but a subsequent qpush without
823 # an argument is an error (nothing to apply). This allows a loop
823 # an argument is an error (nothing to apply). This allows a loop
824 # of "...while hg qpush..." to work as it detects an error when done
824 # of "...while hg qpush..." to work as it detects an error when done
825 if self.series_end() == len(self.series):
825 if self.series_end() == len(self.series):
826 self.ui.warn(_('patch series already fully applied\n'))
826 self.ui.warn(_('patch series already fully applied\n'))
827 return 1
827 return 1
828 if not force:
828 if not force:
829 self.check_localchanges(repo)
829 self.check_localchanges(repo)
830
830
831 self.applied_dirty = 1;
831 self.applied_dirty = 1;
832 start = self.series_end()
832 start = self.series_end()
833 if start > 0:
833 if start > 0:
834 self.check_toppatch(repo)
834 self.check_toppatch(repo)
835 if not patch:
835 if not patch:
836 patch = self.series[start]
836 patch = self.series[start]
837 end = start + 1
837 end = start + 1
838 else:
838 else:
839 end = self.series.index(patch, start) + 1
839 end = self.series.index(patch, start) + 1
840 s = self.series[start:end]
840 s = self.series[start:end]
841 if mergeq:
841 if mergeq:
842 ret = self.mergepatch(repo, mergeq, s, wlock)
842 ret = self.mergepatch(repo, mergeq, s, wlock)
843 else:
843 else:
844 ret = self.apply(repo, s, list, wlock=wlock)
844 ret = self.apply(repo, s, list, wlock=wlock)
845 top = self.applied[-1].name
845 top = self.applied[-1].name
846 if ret[0]:
846 if ret[0]:
847 self.ui.write("Errors during apply, please fix and refresh %s\n" %
847 self.ui.write("Errors during apply, please fix and refresh %s\n" %
848 top)
848 top)
849 else:
849 else:
850 self.ui.write("Now at: %s\n" % top)
850 self.ui.write("Now at: %s\n" % top)
851 return ret[0]
851 return ret[0]
852
852
853 def pop(self, repo, patch=None, force=False, update=True, all=False,
853 def pop(self, repo, patch=None, force=False, update=True, all=False,
854 wlock=None):
854 wlock=None):
855 def getfile(f, rev):
855 def getfile(f, rev):
856 t = repo.file(f).read(rev)
856 t = repo.file(f).read(rev)
857 repo.wfile(f, "w").write(t)
857 repo.wfile(f, "w").write(t)
858
858
859 if not wlock:
859 if not wlock:
860 wlock = repo.wlock()
860 wlock = repo.wlock()
861 if patch:
861 if patch:
862 # index, rev, patch
862 # index, rev, patch
863 info = self.isapplied(patch)
863 info = self.isapplied(patch)
864 if not info:
864 if not info:
865 patch = self.lookup(patch)
865 patch = self.lookup(patch)
866 info = self.isapplied(patch)
866 info = self.isapplied(patch)
867 if not info:
867 if not info:
868 raise util.Abort(_("patch %s is not applied") % patch)
868 raise util.Abort(_("patch %s is not applied") % patch)
869
869
870 if len(self.applied) == 0:
870 if len(self.applied) == 0:
871 # Allow qpop -a to work repeatedly,
871 # Allow qpop -a to work repeatedly,
872 # but not qpop without an argument
872 # but not qpop without an argument
873 self.ui.warn(_("no patches applied\n"))
873 self.ui.warn(_("no patches applied\n"))
874 return not all
874 return not all
875
875
876 if not update:
876 if not update:
877 parents = repo.dirstate.parents()
877 parents = repo.dirstate.parents()
878 rr = [ revlog.bin(x.rev) for x in self.applied ]
878 rr = [ revlog.bin(x.rev) for x in self.applied ]
879 for p in parents:
879 for p in parents:
880 if p in rr:
880 if p in rr:
881 self.ui.warn("qpop: forcing dirstate update\n")
881 self.ui.warn("qpop: forcing dirstate update\n")
882 update = True
882 update = True
883
883
884 if not force and update:
884 if not force and update:
885 self.check_localchanges(repo)
885 self.check_localchanges(repo)
886
886
887 self.applied_dirty = 1;
887 self.applied_dirty = 1;
888 end = len(self.applied)
888 end = len(self.applied)
889 if not patch:
889 if not patch:
890 if all:
890 if all:
891 popi = 0
891 popi = 0
892 else:
892 else:
893 popi = len(self.applied) - 1
893 popi = len(self.applied) - 1
894 else:
894 else:
895 popi = info[0] + 1
895 popi = info[0] + 1
896 if popi >= end:
896 if popi >= end:
897 self.ui.warn("qpop: %s is already at the top\n" % patch)
897 self.ui.warn("qpop: %s is already at the top\n" % patch)
898 return
898 return
899 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
899 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
900
900
901 start = info[0]
901 start = info[0]
902 rev = revlog.bin(info[1])
902 rev = revlog.bin(info[1])
903
903
904 # we know there are no local changes, so we can make a simplified
904 # we know there are no local changes, so we can make a simplified
905 # form of hg.update.
905 # form of hg.update.
906 if update:
906 if update:
907 top = self.check_toppatch(repo)
907 top = self.check_toppatch(repo)
908 qp = self.qparents(repo, rev)
908 qp = self.qparents(repo, rev)
909 changes = repo.changelog.read(qp)
909 changes = repo.changelog.read(qp)
910 mmap = repo.manifest.read(changes[0])
910 mmap = repo.manifest.read(changes[0])
911 m, a, r, d, u = repo.status(qp, top)[:5]
911 m, a, r, d, u = repo.status(qp, top)[:5]
912 if d:
912 if d:
913 raise util.Abort("deletions found between repo revs")
913 raise util.Abort("deletions found between repo revs")
914 for f in m:
914 for f in m:
915 getfile(f, mmap[f])
915 getfile(f, mmap[f])
916 for f in r:
916 for f in r:
917 getfile(f, mmap[f])
917 getfile(f, mmap[f])
918 util.set_exec(repo.wjoin(f), mmap.execf(f))
918 util.set_exec(repo.wjoin(f), mmap.execf(f))
919 repo.dirstate.update(m + r, 'n')
919 repo.dirstate.update(m + r, 'n')
920 for f in a:
920 for f in a:
921 try:
921 try:
922 os.unlink(repo.wjoin(f))
922 os.unlink(repo.wjoin(f))
923 except OSError, e:
923 except OSError, e:
924 if e.errno != errno.ENOENT:
924 if e.errno != errno.ENOENT:
925 raise
925 raise
926 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
926 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
927 except: pass
927 except: pass
928 if a:
928 if a:
929 repo.dirstate.forget(a)
929 repo.dirstate.forget(a)
930 repo.dirstate.setparents(qp, revlog.nullid)
930 repo.dirstate.setparents(qp, revlog.nullid)
931 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
931 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
932 del self.applied[start:end]
932 del self.applied[start:end]
933 if len(self.applied):
933 if len(self.applied):
934 self.ui.write("Now at: %s\n" % self.applied[-1].name)
934 self.ui.write("Now at: %s\n" % self.applied[-1].name)
935 else:
935 else:
936 self.ui.write("Patch queue now empty\n")
936 self.ui.write("Patch queue now empty\n")
937
937
938 def diff(self, repo, pats, opts):
938 def diff(self, repo, pats, opts):
939 top = self.check_toppatch(repo)
939 top = self.check_toppatch(repo)
940 if not top:
940 if not top:
941 self.ui.write("No patches applied\n")
941 self.ui.write("No patches applied\n")
942 return
942 return
943 qp = self.qparents(repo, top)
943 qp = self.qparents(repo, top)
944 if opts.get('git'):
944 if opts.get('git'):
945 self.diffopts().git = True
945 self.diffopts().git = True
946 self.printdiff(repo, qp, files=pats, opts=opts)
946 self.printdiff(repo, qp, files=pats, opts=opts)
947
947
948 def refresh(self, repo, pats=None, **opts):
948 def refresh(self, repo, pats=None, **opts):
949 if len(self.applied) == 0:
949 if len(self.applied) == 0:
950 self.ui.write("No patches applied\n")
950 self.ui.write("No patches applied\n")
951 return 1
951 return 1
952 wlock = repo.wlock()
952 wlock = repo.wlock()
953 self.check_toppatch(repo)
953 self.check_toppatch(repo)
954 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
954 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
955 top = revlog.bin(top)
955 top = revlog.bin(top)
956 cparents = repo.changelog.parents(top)
956 cparents = repo.changelog.parents(top)
957 patchparent = self.qparents(repo, top)
957 patchparent = self.qparents(repo, top)
958 message, comments, user, date, patchfound = self.readheaders(patchfn)
958 message, comments, user, date, patchfound = self.readheaders(patchfn)
959
959
960 patchf = self.opener(patchfn, "w")
960 patchf = self.opener(patchfn, "w")
961 msg = opts.get('msg', '').rstrip()
961 msg = opts.get('msg', '').rstrip()
962 if msg:
962 if msg:
963 if comments:
963 if comments:
964 # Remove existing message.
964 # Remove existing message.
965 ci = 0
965 ci = 0
966 for mi in xrange(len(message)):
966 for mi in xrange(len(message)):
967 while message[mi] != comments[ci]:
967 while message[mi] != comments[ci]:
968 ci += 1
968 ci += 1
969 del comments[ci]
969 del comments[ci]
970 comments.append(msg)
970 comments.append(msg)
971 if comments:
971 if comments:
972 comments = "\n".join(comments) + '\n\n'
972 comments = "\n".join(comments) + '\n\n'
973 patchf.write(comments)
973 patchf.write(comments)
974
974
975 if opts.get('git'):
975 if opts.get('git'):
976 self.diffopts().git = True
976 self.diffopts().git = True
977 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
977 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
978 tip = repo.changelog.tip()
978 tip = repo.changelog.tip()
979 if top == tip:
979 if top == tip:
980 # if the top of our patch queue is also the tip, there is an
980 # if the top of our patch queue is also the tip, there is an
981 # optimization here. We update the dirstate in place and strip
981 # optimization here. We update the dirstate in place and strip
982 # off the tip commit. Then just commit the current directory
982 # off the tip commit. Then just commit the current directory
983 # tree. We can also send repo.commit the list of files
983 # tree. We can also send repo.commit the list of files
984 # changed to speed up the diff
984 # changed to speed up the diff
985 #
985 #
986 # in short mode, we only diff the files included in the
986 # in short mode, we only diff the files included in the
987 # patch already
987 # patch already
988 #
988 #
989 # this should really read:
989 # this should really read:
990 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
990 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
991 # but we do it backwards to take advantage of manifest/chlog
991 # but we do it backwards to take advantage of manifest/chlog
992 # caching against the next repo.status call
992 # caching against the next repo.status call
993 #
993 #
994 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
994 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
995 changes = repo.changelog.read(tip)
995 changes = repo.changelog.read(tip)
996 man = repo.manifest.read(changes[0])
996 man = repo.manifest.read(changes[0])
997 aaa = aa[:]
997 aaa = aa[:]
998 if opts.get('short'):
998 if opts.get('short'):
999 filelist = mm + aa + dd
999 filelist = mm + aa + dd
1000 else:
1000 else:
1001 filelist = None
1001 filelist = None
1002 m, a, r, d, u = repo.status(files=filelist)[:5]
1002 m, a, r, d, u = repo.status(files=filelist)[:5]
1003
1003
1004 # we might end up with files that were added between tip and
1004 # we might end up with files that were added between tip and
1005 # the dirstate parent, but then changed in the local dirstate.
1005 # the dirstate parent, but then changed in the local dirstate.
1006 # in this case, we want them to only show up in the added section
1006 # in this case, we want them to only show up in the added section
1007 for x in m:
1007 for x in m:
1008 if x not in aa:
1008 if x not in aa:
1009 mm.append(x)
1009 mm.append(x)
1010 # we might end up with files added by the local dirstate that
1010 # we might end up with files added by the local dirstate that
1011 # were deleted by the patch. In this case, they should only
1011 # were deleted by the patch. In this case, they should only
1012 # show up in the changed section.
1012 # show up in the changed section.
1013 for x in a:
1013 for x in a:
1014 if x in dd:
1014 if x in dd:
1015 del dd[dd.index(x)]
1015 del dd[dd.index(x)]
1016 mm.append(x)
1016 mm.append(x)
1017 else:
1017 else:
1018 aa.append(x)
1018 aa.append(x)
1019 # make sure any files deleted in the local dirstate
1019 # make sure any files deleted in the local dirstate
1020 # are not in the add or change column of the patch
1020 # are not in the add or change column of the patch
1021 forget = []
1021 forget = []
1022 for x in d + r:
1022 for x in d + r:
1023 if x in aa:
1023 if x in aa:
1024 del aa[aa.index(x)]
1024 del aa[aa.index(x)]
1025 forget.append(x)
1025 forget.append(x)
1026 continue
1026 continue
1027 elif x in mm:
1027 elif x in mm:
1028 del mm[mm.index(x)]
1028 del mm[mm.index(x)]
1029 dd.append(x)
1029 dd.append(x)
1030
1030
1031 m = util.unique(mm)
1031 m = util.unique(mm)
1032 r = util.unique(dd)
1032 r = util.unique(dd)
1033 a = util.unique(aa)
1033 a = util.unique(aa)
1034 filelist = filter(matchfn, util.unique(m + r + a))
1034 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1035 filelist = util.unique(c[0] + c[1] + c[2])
1035 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1036 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1036 fp=patchf, changes=(m, a, r, [], u),
1037 fp=patchf, changes=c, opts=self.diffopts())
1037 opts=self.diffopts())
1038 patchf.close()
1038 patchf.close()
1039
1039
1040 repo.dirstate.setparents(*cparents)
1040 repo.dirstate.setparents(*cparents)
1041 copies = {}
1041 copies = {}
1042 for dst in a:
1042 for dst in a:
1043 src = repo.dirstate.copied(dst)
1043 src = repo.dirstate.copied(dst)
1044 if src is None:
1044 if src is None:
1045 continue
1045 continue
1046 copies.setdefault(src, []).append(dst)
1046 copies.setdefault(src, []).append(dst)
1047 repo.dirstate.update(a, 'a')
1047 repo.dirstate.update(a, 'a')
1048 # remember the copies between patchparent and tip
1048 # remember the copies between patchparent and tip
1049 # this may be slow, so don't do it if we're not tracking copies
1049 # this may be slow, so don't do it if we're not tracking copies
1050 if self.diffopts().git:
1050 if self.diffopts().git:
1051 for dst in aaa:
1051 for dst in aaa:
1052 f = repo.file(dst)
1052 f = repo.file(dst)
1053 src = f.renamed(man[dst])
1053 src = f.renamed(man[dst])
1054 if src:
1054 if src:
1055 copies[src[0]] = copies.get(dst, [])
1055 copies[src[0]] = copies.get(dst, [])
1056 if dst in a:
1056 if dst in a:
1057 copies[src[0]].append(dst)
1057 copies[src[0]].append(dst)
1058 # we can't copy a file created by the patch itself
1058 # we can't copy a file created by the patch itself
1059 if dst in copies:
1059 if dst in copies:
1060 del copies[dst]
1060 del copies[dst]
1061 for src, dsts in copies.iteritems():
1061 for src, dsts in copies.iteritems():
1062 for dst in dsts:
1062 for dst in dsts:
1063 repo.dirstate.copy(src, dst)
1063 repo.dirstate.copy(src, dst)
1064 repo.dirstate.update(r, 'r')
1064 repo.dirstate.update(r, 'r')
1065 # if the patch excludes a modified file, mark that file with mtime=0
1065 # if the patch excludes a modified file, mark that file with mtime=0
1066 # so status can see it.
1066 # so status can see it.
1067 mm = []
1067 mm = []
1068 for i in xrange(len(m)-1, -1, -1):
1068 for i in xrange(len(m)-1, -1, -1):
1069 if not matchfn(m[i]):
1069 if not matchfn(m[i]):
1070 mm.append(m[i])
1070 mm.append(m[i])
1071 del m[i]
1071 del m[i]
1072 repo.dirstate.update(m, 'n')
1072 repo.dirstate.update(m, 'n')
1073 repo.dirstate.update(mm, 'n', st_mtime=0)
1073 repo.dirstate.update(mm, 'n', st_mtime=0)
1074 repo.dirstate.forget(forget)
1074 repo.dirstate.forget(forget)
1075
1075
1076 if not msg:
1076 if not msg:
1077 if not message:
1077 if not message:
1078 message = "patch queue: %s\n" % patchfn
1078 message = "patch queue: %s\n" % patchfn
1079 else:
1079 else:
1080 message = "\n".join(message)
1080 message = "\n".join(message)
1081 else:
1081 else:
1082 message = msg
1082 message = msg
1083
1083
1084 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1084 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1085 n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock)
1085 n = repo.commit(filelist, message, changes[1], match=matchfn,
1086 force=1, wlock=wlock)
1086 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1087 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1087 self.applied_dirty = 1
1088 self.applied_dirty = 1
1088 else:
1089 else:
1089 self.printdiff(repo, patchparent, fp=patchf)
1090 self.printdiff(repo, patchparent, fp=patchf)
1090 patchf.close()
1091 patchf.close()
1091 added = repo.status()[1]
1092 added = repo.status()[1]
1092 for a in added:
1093 for a in added:
1093 f = repo.wjoin(a)
1094 f = repo.wjoin(a)
1094 try:
1095 try:
1095 os.unlink(f)
1096 os.unlink(f)
1096 except OSError, e:
1097 except OSError, e:
1097 if e.errno != errno.ENOENT:
1098 if e.errno != errno.ENOENT:
1098 raise
1099 raise
1099 try: os.removedirs(os.path.dirname(f))
1100 try: os.removedirs(os.path.dirname(f))
1100 except: pass
1101 except: pass
1101 # forget the file copies in the dirstate
1102 # forget the file copies in the dirstate
1102 # push should readd the files later on
1103 # push should readd the files later on
1103 repo.dirstate.forget(added)
1104 repo.dirstate.forget(added)
1104 self.pop(repo, force=True, wlock=wlock)
1105 self.pop(repo, force=True, wlock=wlock)
1105 self.push(repo, force=True, wlock=wlock)
1106 self.push(repo, force=True, wlock=wlock)
1106
1107
1107 def init(self, repo, create=False):
1108 def init(self, repo, create=False):
1108 if not create and os.path.isdir(self.path):
1109 if not create and os.path.isdir(self.path):
1109 raise util.Abort(_("patch queue directory already exists"))
1110 raise util.Abort(_("patch queue directory already exists"))
1110 try:
1111 try:
1111 os.mkdir(self.path)
1112 os.mkdir(self.path)
1112 except OSError, inst:
1113 except OSError, inst:
1113 if inst.errno != errno.EEXIST or not create:
1114 if inst.errno != errno.EEXIST or not create:
1114 raise
1115 raise
1115 if create:
1116 if create:
1116 return self.qrepo(create=True)
1117 return self.qrepo(create=True)
1117
1118
1118 def unapplied(self, repo, patch=None):
1119 def unapplied(self, repo, patch=None):
1119 if patch and patch not in self.series:
1120 if patch and patch not in self.series:
1120 raise util.Abort(_("patch %s is not in series file") % patch)
1121 raise util.Abort(_("patch %s is not in series file") % patch)
1121 if not patch:
1122 if not patch:
1122 start = self.series_end()
1123 start = self.series_end()
1123 else:
1124 else:
1124 start = self.series.index(patch) + 1
1125 start = self.series.index(patch) + 1
1125 unapplied = []
1126 unapplied = []
1126 for i in xrange(start, len(self.series)):
1127 for i in xrange(start, len(self.series)):
1127 pushable, reason = self.pushable(i)
1128 pushable, reason = self.pushable(i)
1128 if pushable:
1129 if pushable:
1129 unapplied.append((i, self.series[i]))
1130 unapplied.append((i, self.series[i]))
1130 self.explain_pushable(i)
1131 self.explain_pushable(i)
1131 return unapplied
1132 return unapplied
1132
1133
1133 def qseries(self, repo, missing=None, start=0, length=0, status=None,
1134 def qseries(self, repo, missing=None, start=0, length=0, status=None,
1134 summary=False):
1135 summary=False):
1135 def displayname(patchname):
1136 def displayname(patchname):
1136 if summary:
1137 if summary:
1137 msg = self.readheaders(patchname)[0]
1138 msg = self.readheaders(patchname)[0]
1138 msg = msg and ': ' + msg[0] or ': '
1139 msg = msg and ': ' + msg[0] or ': '
1139 else:
1140 else:
1140 msg = ''
1141 msg = ''
1141 return '%s%s' % (patchname, msg)
1142 return '%s%s' % (patchname, msg)
1142
1143
1143 def pname(i):
1144 def pname(i):
1144 if status == 'A':
1145 if status == 'A':
1145 return self.applied[i].name
1146 return self.applied[i].name
1146 else:
1147 else:
1147 return self.series[i]
1148 return self.series[i]
1148
1149
1149 applied = dict.fromkeys([p.name for p in self.applied])
1150 applied = dict.fromkeys([p.name for p in self.applied])
1150 if not length:
1151 if not length:
1151 length = len(self.series) - start
1152 length = len(self.series) - start
1152 if not missing:
1153 if not missing:
1153 for i in xrange(start, start+length):
1154 for i in xrange(start, start+length):
1154 pfx = ''
1155 pfx = ''
1155 patch = pname(i)
1156 patch = pname(i)
1156 if self.ui.verbose:
1157 if self.ui.verbose:
1157 if patch in applied:
1158 if patch in applied:
1158 stat = 'A'
1159 stat = 'A'
1159 elif self.pushable(i)[0]:
1160 elif self.pushable(i)[0]:
1160 stat = 'U'
1161 stat = 'U'
1161 else:
1162 else:
1162 stat = 'G'
1163 stat = 'G'
1163 pfx = '%d %s ' % (i, stat)
1164 pfx = '%d %s ' % (i, stat)
1164 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1165 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1165 else:
1166 else:
1166 msng_list = []
1167 msng_list = []
1167 for root, dirs, files in os.walk(self.path):
1168 for root, dirs, files in os.walk(self.path):
1168 d = root[len(self.path) + 1:]
1169 d = root[len(self.path) + 1:]
1169 for f in files:
1170 for f in files:
1170 fl = os.path.join(d, f)
1171 fl = os.path.join(d, f)
1171 if (fl not in self.series and
1172 if (fl not in self.series and
1172 fl not in (self.status_path, self.series_path)
1173 fl not in (self.status_path, self.series_path)
1173 and not fl.startswith('.')):
1174 and not fl.startswith('.')):
1174 msng_list.append(fl)
1175 msng_list.append(fl)
1175 msng_list.sort()
1176 msng_list.sort()
1176 for x in msng_list:
1177 for x in msng_list:
1177 pfx = self.ui.verbose and ('D ') or ''
1178 pfx = self.ui.verbose and ('D ') or ''
1178 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1179 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1179
1180
1180 def issaveline(self, l):
1181 def issaveline(self, l):
1181 if l.name == '.hg.patches.save.line':
1182 if l.name == '.hg.patches.save.line':
1182 return True
1183 return True
1183
1184
1184 def qrepo(self, create=False):
1185 def qrepo(self, create=False):
1185 if create or os.path.isdir(self.join(".hg")):
1186 if create or os.path.isdir(self.join(".hg")):
1186 return hg.repository(self.ui, path=self.path, create=create)
1187 return hg.repository(self.ui, path=self.path, create=create)
1187
1188
1188 def restore(self, repo, rev, delete=None, qupdate=None):
1189 def restore(self, repo, rev, delete=None, qupdate=None):
1189 c = repo.changelog.read(rev)
1190 c = repo.changelog.read(rev)
1190 desc = c[4].strip()
1191 desc = c[4].strip()
1191 lines = desc.splitlines()
1192 lines = desc.splitlines()
1192 i = 0
1193 i = 0
1193 datastart = None
1194 datastart = None
1194 series = []
1195 series = []
1195 applied = []
1196 applied = []
1196 qpp = None
1197 qpp = None
1197 for i in xrange(0, len(lines)):
1198 for i in xrange(0, len(lines)):
1198 if lines[i] == 'Patch Data:':
1199 if lines[i] == 'Patch Data:':
1199 datastart = i + 1
1200 datastart = i + 1
1200 elif lines[i].startswith('Dirstate:'):
1201 elif lines[i].startswith('Dirstate:'):
1201 l = lines[i].rstrip()
1202 l = lines[i].rstrip()
1202 l = l[10:].split(' ')
1203 l = l[10:].split(' ')
1203 qpp = [ hg.bin(x) for x in l ]
1204 qpp = [ hg.bin(x) for x in l ]
1204 elif datastart != None:
1205 elif datastart != None:
1205 l = lines[i].rstrip()
1206 l = lines[i].rstrip()
1206 se = statusentry(l)
1207 se = statusentry(l)
1207 file_ = se.name
1208 file_ = se.name
1208 if se.rev:
1209 if se.rev:
1209 applied.append(se)
1210 applied.append(se)
1210 else:
1211 else:
1211 series.append(file_)
1212 series.append(file_)
1212 if datastart == None:
1213 if datastart == None:
1213 self.ui.warn("No saved patch data found\n")
1214 self.ui.warn("No saved patch data found\n")
1214 return 1
1215 return 1
1215 self.ui.warn("restoring status: %s\n" % lines[0])
1216 self.ui.warn("restoring status: %s\n" % lines[0])
1216 self.full_series = series
1217 self.full_series = series
1217 self.applied = applied
1218 self.applied = applied
1218 self.parse_series()
1219 self.parse_series()
1219 self.series_dirty = 1
1220 self.series_dirty = 1
1220 self.applied_dirty = 1
1221 self.applied_dirty = 1
1221 heads = repo.changelog.heads()
1222 heads = repo.changelog.heads()
1222 if delete:
1223 if delete:
1223 if rev not in heads:
1224 if rev not in heads:
1224 self.ui.warn("save entry has children, leaving it alone\n")
1225 self.ui.warn("save entry has children, leaving it alone\n")
1225 else:
1226 else:
1226 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1227 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1227 pp = repo.dirstate.parents()
1228 pp = repo.dirstate.parents()
1228 if rev in pp:
1229 if rev in pp:
1229 update = True
1230 update = True
1230 else:
1231 else:
1231 update = False
1232 update = False
1232 self.strip(repo, rev, update=update, backup='strip')
1233 self.strip(repo, rev, update=update, backup='strip')
1233 if qpp:
1234 if qpp:
1234 self.ui.warn("saved queue repository parents: %s %s\n" %
1235 self.ui.warn("saved queue repository parents: %s %s\n" %
1235 (hg.short(qpp[0]), hg.short(qpp[1])))
1236 (hg.short(qpp[0]), hg.short(qpp[1])))
1236 if qupdate:
1237 if qupdate:
1237 print "queue directory updating"
1238 print "queue directory updating"
1238 r = self.qrepo()
1239 r = self.qrepo()
1239 if not r:
1240 if not r:
1240 self.ui.warn("Unable to load queue repository\n")
1241 self.ui.warn("Unable to load queue repository\n")
1241 return 1
1242 return 1
1242 hg.clean(r, qpp[0])
1243 hg.clean(r, qpp[0])
1243
1244
1244 def save(self, repo, msg=None):
1245 def save(self, repo, msg=None):
1245 if len(self.applied) == 0:
1246 if len(self.applied) == 0:
1246 self.ui.warn("save: no patches applied, exiting\n")
1247 self.ui.warn("save: no patches applied, exiting\n")
1247 return 1
1248 return 1
1248 if self.issaveline(self.applied[-1]):
1249 if self.issaveline(self.applied[-1]):
1249 self.ui.warn("status is already saved\n")
1250 self.ui.warn("status is already saved\n")
1250 return 1
1251 return 1
1251
1252
1252 ar = [ ':' + x for x in self.full_series ]
1253 ar = [ ':' + x for x in self.full_series ]
1253 if not msg:
1254 if not msg:
1254 msg = "hg patches saved state"
1255 msg = "hg patches saved state"
1255 else:
1256 else:
1256 msg = "hg patches: " + msg.rstrip('\r\n')
1257 msg = "hg patches: " + msg.rstrip('\r\n')
1257 r = self.qrepo()
1258 r = self.qrepo()
1258 if r:
1259 if r:
1259 pp = r.dirstate.parents()
1260 pp = r.dirstate.parents()
1260 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1261 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1261 msg += "\n\nPatch Data:\n"
1262 msg += "\n\nPatch Data:\n"
1262 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1263 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1263 "\n".join(ar) + '\n' or "")
1264 "\n".join(ar) + '\n' or "")
1264 n = repo.commit(None, text, user=None, force=1)
1265 n = repo.commit(None, text, user=None, force=1)
1265 if not n:
1266 if not n:
1266 self.ui.warn("repo commit failed\n")
1267 self.ui.warn("repo commit failed\n")
1267 return 1
1268 return 1
1268 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1269 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1269 self.applied_dirty = 1
1270 self.applied_dirty = 1
1270
1271
1271 def full_series_end(self):
1272 def full_series_end(self):
1272 if len(self.applied) > 0:
1273 if len(self.applied) > 0:
1273 p = self.applied[-1].name
1274 p = self.applied[-1].name
1274 end = self.find_series(p)
1275 end = self.find_series(p)
1275 if end == None:
1276 if end == None:
1276 return len(self.full_series)
1277 return len(self.full_series)
1277 return end + 1
1278 return end + 1
1278 return 0
1279 return 0
1279
1280
1280 def series_end(self, all_patches=False):
1281 def series_end(self, all_patches=False):
1281 end = 0
1282 end = 0
1282 def next(start):
1283 def next(start):
1283 if all_patches:
1284 if all_patches:
1284 return start
1285 return start
1285 i = start
1286 i = start
1286 while i < len(self.series):
1287 while i < len(self.series):
1287 p, reason = self.pushable(i)
1288 p, reason = self.pushable(i)
1288 if p:
1289 if p:
1289 break
1290 break
1290 self.explain_pushable(i)
1291 self.explain_pushable(i)
1291 i += 1
1292 i += 1
1292 return i
1293 return i
1293 if len(self.applied) > 0:
1294 if len(self.applied) > 0:
1294 p = self.applied[-1].name
1295 p = self.applied[-1].name
1295 try:
1296 try:
1296 end = self.series.index(p)
1297 end = self.series.index(p)
1297 except ValueError:
1298 except ValueError:
1298 return 0
1299 return 0
1299 return next(end + 1)
1300 return next(end + 1)
1300 return next(end)
1301 return next(end)
1301
1302
1302 def appliedname(self, index):
1303 def appliedname(self, index):
1303 pname = self.applied[index].name
1304 pname = self.applied[index].name
1304 if not self.ui.verbose:
1305 if not self.ui.verbose:
1305 p = pname
1306 p = pname
1306 else:
1307 else:
1307 p = str(self.series.index(pname)) + " " + pname
1308 p = str(self.series.index(pname)) + " " + pname
1308 return p
1309 return p
1309
1310
1310 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1311 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1311 force=None, git=False):
1312 force=None, git=False):
1312 def checkseries(patchname):
1313 def checkseries(patchname):
1313 if patchname in self.series:
1314 if patchname in self.series:
1314 raise util.Abort(_('patch %s is already in the series file')
1315 raise util.Abort(_('patch %s is already in the series file')
1315 % patchname)
1316 % patchname)
1316 def checkfile(patchname):
1317 def checkfile(patchname):
1317 if not force and os.path.exists(self.join(patchname)):
1318 if not force and os.path.exists(self.join(patchname)):
1318 raise util.Abort(_('patch "%s" already exists')
1319 raise util.Abort(_('patch "%s" already exists')
1319 % patchname)
1320 % patchname)
1320
1321
1321 if rev:
1322 if rev:
1322 if files:
1323 if files:
1323 raise util.Abort(_('option "-r" not valid when importing '
1324 raise util.Abort(_('option "-r" not valid when importing '
1324 'files'))
1325 'files'))
1325 rev = cmdutil.revrange(repo, rev)
1326 rev = cmdutil.revrange(repo, rev)
1326 rev.sort(lambda x, y: cmp(y, x))
1327 rev.sort(lambda x, y: cmp(y, x))
1327 if (len(files) > 1 or len(rev) > 1) and patchname:
1328 if (len(files) > 1 or len(rev) > 1) and patchname:
1328 raise util.Abort(_('option "-n" not valid when importing multiple '
1329 raise util.Abort(_('option "-n" not valid when importing multiple '
1329 'patches'))
1330 'patches'))
1330 i = 0
1331 i = 0
1331 added = []
1332 added = []
1332 if rev:
1333 if rev:
1333 # If mq patches are applied, we can only import revisions
1334 # If mq patches are applied, we can only import revisions
1334 # that form a linear path to qbase.
1335 # that form a linear path to qbase.
1335 # Otherwise, they should form a linear path to a head.
1336 # Otherwise, they should form a linear path to a head.
1336 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1337 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1337 if len(heads) > 1:
1338 if len(heads) > 1:
1338 raise util.Abort(_('revision %d is the root of more than one '
1339 raise util.Abort(_('revision %d is the root of more than one '
1339 'branch') % rev[-1])
1340 'branch') % rev[-1])
1340 if self.applied:
1341 if self.applied:
1341 base = revlog.hex(repo.changelog.node(rev[0]))
1342 base = revlog.hex(repo.changelog.node(rev[0]))
1342 if base in [n.rev for n in self.applied]:
1343 if base in [n.rev for n in self.applied]:
1343 raise util.Abort(_('revision %d is already managed')
1344 raise util.Abort(_('revision %d is already managed')
1344 % rev[0])
1345 % rev[0])
1345 if heads != [revlog.bin(self.applied[-1].rev)]:
1346 if heads != [revlog.bin(self.applied[-1].rev)]:
1346 raise util.Abort(_('revision %d is not the parent of '
1347 raise util.Abort(_('revision %d is not the parent of '
1347 'the queue') % rev[0])
1348 'the queue') % rev[0])
1348 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1349 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1349 lastparent = repo.changelog.parentrevs(base)[0]
1350 lastparent = repo.changelog.parentrevs(base)[0]
1350 else:
1351 else:
1351 if heads != [repo.changelog.node(rev[0])]:
1352 if heads != [repo.changelog.node(rev[0])]:
1352 raise util.Abort(_('revision %d has unmanaged children')
1353 raise util.Abort(_('revision %d has unmanaged children')
1353 % rev[0])
1354 % rev[0])
1354 lastparent = None
1355 lastparent = None
1355
1356
1356 if git:
1357 if git:
1357 self.diffopts().git = True
1358 self.diffopts().git = True
1358
1359
1359 for r in rev:
1360 for r in rev:
1360 p1, p2 = repo.changelog.parentrevs(r)
1361 p1, p2 = repo.changelog.parentrevs(r)
1361 n = repo.changelog.node(r)
1362 n = repo.changelog.node(r)
1362 if p2 != revlog.nullrev:
1363 if p2 != revlog.nullrev:
1363 raise util.Abort(_('cannot import merge revision %d') % r)
1364 raise util.Abort(_('cannot import merge revision %d') % r)
1364 if lastparent and lastparent != r:
1365 if lastparent and lastparent != r:
1365 raise util.Abort(_('revision %d is not the parent of %d')
1366 raise util.Abort(_('revision %d is not the parent of %d')
1366 % (r, lastparent))
1367 % (r, lastparent))
1367 lastparent = p1
1368 lastparent = p1
1368
1369
1369 if not patchname:
1370 if not patchname:
1370 patchname = normname('%d.diff' % r)
1371 patchname = normname('%d.diff' % r)
1371 checkseries(patchname)
1372 checkseries(patchname)
1372 checkfile(patchname)
1373 checkfile(patchname)
1373 self.full_series.insert(0, patchname)
1374 self.full_series.insert(0, patchname)
1374
1375
1375 patchf = self.opener(patchname, "w")
1376 patchf = self.opener(patchname, "w")
1376 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1377 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1377 patchf.close()
1378 patchf.close()
1378
1379
1379 se = statusentry(revlog.hex(n), patchname)
1380 se = statusentry(revlog.hex(n), patchname)
1380 self.applied.insert(0, se)
1381 self.applied.insert(0, se)
1381
1382
1382 added.append(patchname)
1383 added.append(patchname)
1383 patchname = None
1384 patchname = None
1384 self.parse_series()
1385 self.parse_series()
1385 self.applied_dirty = 1
1386 self.applied_dirty = 1
1386
1387
1387 for filename in files:
1388 for filename in files:
1388 if existing:
1389 if existing:
1389 if filename == '-':
1390 if filename == '-':
1390 raise util.Abort(_('-e is incompatible with import from -'))
1391 raise util.Abort(_('-e is incompatible with import from -'))
1391 if not patchname:
1392 if not patchname:
1392 patchname = normname(filename)
1393 patchname = normname(filename)
1393 if not os.path.isfile(self.join(patchname)):
1394 if not os.path.isfile(self.join(patchname)):
1394 raise util.Abort(_("patch %s does not exist") % patchname)
1395 raise util.Abort(_("patch %s does not exist") % patchname)
1395 else:
1396 else:
1396 try:
1397 try:
1397 if filename == '-':
1398 if filename == '-':
1398 if not patchname:
1399 if not patchname:
1399 raise util.Abort(_('need --name to import a patch from -'))
1400 raise util.Abort(_('need --name to import a patch from -'))
1400 text = sys.stdin.read()
1401 text = sys.stdin.read()
1401 else:
1402 else:
1402 text = file(filename).read()
1403 text = file(filename).read()
1403 except IOError:
1404 except IOError:
1404 raise util.Abort(_("unable to read %s") % patchname)
1405 raise util.Abort(_("unable to read %s") % patchname)
1405 if not patchname:
1406 if not patchname:
1406 patchname = normname(os.path.basename(filename))
1407 patchname = normname(os.path.basename(filename))
1407 checkfile(patchname)
1408 checkfile(patchname)
1408 patchf = self.opener(patchname, "w")
1409 patchf = self.opener(patchname, "w")
1409 patchf.write(text)
1410 patchf.write(text)
1410 checkseries(patchname)
1411 checkseries(patchname)
1411 index = self.full_series_end() + i
1412 index = self.full_series_end() + i
1412 self.full_series[index:index] = [patchname]
1413 self.full_series[index:index] = [patchname]
1413 self.parse_series()
1414 self.parse_series()
1414 self.ui.warn("adding %s to series file\n" % patchname)
1415 self.ui.warn("adding %s to series file\n" % patchname)
1415 i += 1
1416 i += 1
1416 added.append(patchname)
1417 added.append(patchname)
1417 patchname = None
1418 patchname = None
1418 self.series_dirty = 1
1419 self.series_dirty = 1
1419 qrepo = self.qrepo()
1420 qrepo = self.qrepo()
1420 if qrepo:
1421 if qrepo:
1421 qrepo.add(added)
1422 qrepo.add(added)
1422
1423
1423 def delete(ui, repo, *patches, **opts):
1424 def delete(ui, repo, *patches, **opts):
1424 """remove patches from queue
1425 """remove patches from queue
1425
1426
1426 With --rev, mq will stop managing the named revisions. The
1427 With --rev, mq will stop managing the named revisions. The
1427 patches must be applied and at the base of the stack. This option
1428 patches must be applied and at the base of the stack. This option
1428 is useful when the patches have been applied upstream.
1429 is useful when the patches have been applied upstream.
1429
1430
1430 Otherwise, the patches must not be applied.
1431 Otherwise, the patches must not be applied.
1431
1432
1432 With --keep, the patch files are preserved in the patch directory."""
1433 With --keep, the patch files are preserved in the patch directory."""
1433 q = repo.mq
1434 q = repo.mq
1434 q.delete(repo, patches, opts)
1435 q.delete(repo, patches, opts)
1435 q.save_dirty()
1436 q.save_dirty()
1436 return 0
1437 return 0
1437
1438
1438 def applied(ui, repo, patch=None, **opts):
1439 def applied(ui, repo, patch=None, **opts):
1439 """print the patches already applied"""
1440 """print the patches already applied"""
1440 q = repo.mq
1441 q = repo.mq
1441 if patch:
1442 if patch:
1442 if patch not in q.series:
1443 if patch not in q.series:
1443 raise util.Abort(_("patch %s is not in series file") % patch)
1444 raise util.Abort(_("patch %s is not in series file") % patch)
1444 end = q.series.index(patch) + 1
1445 end = q.series.index(patch) + 1
1445 else:
1446 else:
1446 end = len(q.applied)
1447 end = len(q.applied)
1447 if not end:
1448 if not end:
1448 return
1449 return
1449
1450
1450 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1451 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1451
1452
1452 def unapplied(ui, repo, patch=None, **opts):
1453 def unapplied(ui, repo, patch=None, **opts):
1453 """print the patches not yet applied"""
1454 """print the patches not yet applied"""
1454 q = repo.mq
1455 q = repo.mq
1455 if patch:
1456 if patch:
1456 if patch not in q.series:
1457 if patch not in q.series:
1457 raise util.Abort(_("patch %s is not in series file") % patch)
1458 raise util.Abort(_("patch %s is not in series file") % patch)
1458 start = q.series.index(patch) + 1
1459 start = q.series.index(patch) + 1
1459 else:
1460 else:
1460 start = q.series_end()
1461 start = q.series_end()
1461 q.qseries(repo, start=start, summary=opts.get('summary'))
1462 q.qseries(repo, start=start, summary=opts.get('summary'))
1462
1463
1463 def qimport(ui, repo, *filename, **opts):
1464 def qimport(ui, repo, *filename, **opts):
1464 """import a patch
1465 """import a patch
1465
1466
1466 The patch will have the same name as its source file unless you
1467 The patch will have the same name as its source file unless you
1467 give it a new one with --name.
1468 give it a new one with --name.
1468
1469
1469 You can register an existing patch inside the patch directory
1470 You can register an existing patch inside the patch directory
1470 with the --existing flag.
1471 with the --existing flag.
1471
1472
1472 With --force, an existing patch of the same name will be overwritten.
1473 With --force, an existing patch of the same name will be overwritten.
1473
1474
1474 An existing changeset may be placed under mq control with --rev
1475 An existing changeset may be placed under mq control with --rev
1475 (e.g. qimport --rev tip -n patch will place tip under mq control).
1476 (e.g. qimport --rev tip -n patch will place tip under mq control).
1476 With --git, patches imported with --rev will use the git diff
1477 With --git, patches imported with --rev will use the git diff
1477 format.
1478 format.
1478 """
1479 """
1479 q = repo.mq
1480 q = repo.mq
1480 q.qimport(repo, filename, patchname=opts['name'],
1481 q.qimport(repo, filename, patchname=opts['name'],
1481 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1482 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1482 git=opts['git'])
1483 git=opts['git'])
1483 q.save_dirty()
1484 q.save_dirty()
1484 return 0
1485 return 0
1485
1486
1486 def init(ui, repo, **opts):
1487 def init(ui, repo, **opts):
1487 """init a new queue repository
1488 """init a new queue repository
1488
1489
1489 The queue repository is unversioned by default. If -c is
1490 The queue repository is unversioned by default. If -c is
1490 specified, qinit will create a separate nested repository
1491 specified, qinit will create a separate nested repository
1491 for patches. Use qcommit to commit changes to this queue
1492 for patches. Use qcommit to commit changes to this queue
1492 repository."""
1493 repository."""
1493 q = repo.mq
1494 q = repo.mq
1494 r = q.init(repo, create=opts['create_repo'])
1495 r = q.init(repo, create=opts['create_repo'])
1495 q.save_dirty()
1496 q.save_dirty()
1496 if r:
1497 if r:
1497 if not os.path.exists(r.wjoin('.hgignore')):
1498 if not os.path.exists(r.wjoin('.hgignore')):
1498 fp = r.wopener('.hgignore', 'w')
1499 fp = r.wopener('.hgignore', 'w')
1499 fp.write('syntax: glob\n')
1500 fp.write('syntax: glob\n')
1500 fp.write('status\n')
1501 fp.write('status\n')
1501 fp.write('guards\n')
1502 fp.write('guards\n')
1502 fp.close()
1503 fp.close()
1503 if not os.path.exists(r.wjoin('series')):
1504 if not os.path.exists(r.wjoin('series')):
1504 r.wopener('series', 'w').close()
1505 r.wopener('series', 'w').close()
1505 r.add(['.hgignore', 'series'])
1506 r.add(['.hgignore', 'series'])
1506 commands.add(ui, r)
1507 commands.add(ui, r)
1507 return 0
1508 return 0
1508
1509
1509 def clone(ui, source, dest=None, **opts):
1510 def clone(ui, source, dest=None, **opts):
1510 '''clone main and patch repository at same time
1511 '''clone main and patch repository at same time
1511
1512
1512 If source is local, destination will have no patches applied. If
1513 If source is local, destination will have no patches applied. If
1513 source is remote, this command can not check if patches are
1514 source is remote, this command can not check if patches are
1514 applied in source, so cannot guarantee that patches are not
1515 applied in source, so cannot guarantee that patches are not
1515 applied in destination. If you clone remote repository, be sure
1516 applied in destination. If you clone remote repository, be sure
1516 before that it has no patches applied.
1517 before that it has no patches applied.
1517
1518
1518 Source patch repository is looked for in <src>/.hg/patches by
1519 Source patch repository is looked for in <src>/.hg/patches by
1519 default. Use -p <url> to change.
1520 default. Use -p <url> to change.
1520 '''
1521 '''
1521 commands.setremoteconfig(ui, opts)
1522 commands.setremoteconfig(ui, opts)
1522 if dest is None:
1523 if dest is None:
1523 dest = hg.defaultdest(source)
1524 dest = hg.defaultdest(source)
1524 sr = hg.repository(ui, ui.expandpath(source))
1525 sr = hg.repository(ui, ui.expandpath(source))
1525 qbase, destrev = None, None
1526 qbase, destrev = None, None
1526 if sr.local():
1527 if sr.local():
1527 if sr.mq.applied:
1528 if sr.mq.applied:
1528 qbase = revlog.bin(sr.mq.applied[0].rev)
1529 qbase = revlog.bin(sr.mq.applied[0].rev)
1529 if not hg.islocal(dest):
1530 if not hg.islocal(dest):
1530 destrev = sr.parents(qbase)[0]
1531 heads = dict.fromkeys(sr.heads())
1532 for h in sr.heads(qbase):
1533 del heads[h]
1534 destrev = heads.keys()
1535 destrev.append(sr.changelog.parents(qbase)[0])
1531 ui.note(_('cloning main repo\n'))
1536 ui.note(_('cloning main repo\n'))
1532 sr, dr = hg.clone(ui, sr, dest,
1537 sr, dr = hg.clone(ui, sr, dest,
1533 pull=opts['pull'],
1538 pull=opts['pull'],
1534 rev=destrev,
1539 rev=destrev,
1535 update=False,
1540 update=False,
1536 stream=opts['uncompressed'])
1541 stream=opts['uncompressed'])
1537 ui.note(_('cloning patch repo\n'))
1542 ui.note(_('cloning patch repo\n'))
1538 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1543 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1539 dr.url() + '/.hg/patches',
1544 dr.url() + '/.hg/patches',
1540 pull=opts['pull'],
1545 pull=opts['pull'],
1541 update=not opts['noupdate'],
1546 update=not opts['noupdate'],
1542 stream=opts['uncompressed'])
1547 stream=opts['uncompressed'])
1543 if dr.local():
1548 if dr.local():
1544 if qbase:
1549 if qbase:
1545 ui.note(_('stripping applied patches from destination repo\n'))
1550 ui.note(_('stripping applied patches from destination repo\n'))
1546 dr.mq.strip(dr, qbase, update=False, backup=None)
1551 dr.mq.strip(dr, qbase, update=False, backup=None)
1547 if not opts['noupdate']:
1552 if not opts['noupdate']:
1548 ui.note(_('updating destination repo\n'))
1553 ui.note(_('updating destination repo\n'))
1549 hg.update(dr, dr.changelog.tip())
1554 hg.update(dr, dr.changelog.tip())
1550
1555
1551 def commit(ui, repo, *pats, **opts):
1556 def commit(ui, repo, *pats, **opts):
1552 """commit changes in the queue repository"""
1557 """commit changes in the queue repository"""
1553 q = repo.mq
1558 q = repo.mq
1554 r = q.qrepo()
1559 r = q.qrepo()
1555 if not r: raise util.Abort('no queue repository')
1560 if not r: raise util.Abort('no queue repository')
1556 commands.commit(r.ui, r, *pats, **opts)
1561 commands.commit(r.ui, r, *pats, **opts)
1557
1562
1558 def series(ui, repo, **opts):
1563 def series(ui, repo, **opts):
1559 """print the entire series file"""
1564 """print the entire series file"""
1560 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1565 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1561 return 0
1566 return 0
1562
1567
1563 def top(ui, repo, **opts):
1568 def top(ui, repo, **opts):
1564 """print the name of the current patch"""
1569 """print the name of the current patch"""
1565 q = repo.mq
1570 q = repo.mq
1566 t = len(q.applied)
1571 t = len(q.applied)
1567 if t:
1572 if t:
1568 return q.qseries(repo, start=t-1, length=1, status='A',
1573 return q.qseries(repo, start=t-1, length=1, status='A',
1569 summary=opts.get('summary'))
1574 summary=opts.get('summary'))
1570 else:
1575 else:
1571 ui.write("No patches applied\n")
1576 ui.write("No patches applied\n")
1572 return 1
1577 return 1
1573
1578
1574 def next(ui, repo, **opts):
1579 def next(ui, repo, **opts):
1575 """print the name of the next patch"""
1580 """print the name of the next patch"""
1576 q = repo.mq
1581 q = repo.mq
1577 end = q.series_end()
1582 end = q.series_end()
1578 if end == len(q.series):
1583 if end == len(q.series):
1579 ui.write("All patches applied\n")
1584 ui.write("All patches applied\n")
1580 return 1
1585 return 1
1581 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1586 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1582
1587
1583 def prev(ui, repo, **opts):
1588 def prev(ui, repo, **opts):
1584 """print the name of the previous patch"""
1589 """print the name of the previous patch"""
1585 q = repo.mq
1590 q = repo.mq
1586 l = len(q.applied)
1591 l = len(q.applied)
1587 if l == 1:
1592 if l == 1:
1588 ui.write("Only one patch applied\n")
1593 ui.write("Only one patch applied\n")
1589 return 1
1594 return 1
1590 if not l:
1595 if not l:
1591 ui.write("No patches applied\n")
1596 ui.write("No patches applied\n")
1592 return 1
1597 return 1
1593 return q.qseries(repo, start=l-2, length=1, status='A',
1598 return q.qseries(repo, start=l-2, length=1, status='A',
1594 summary=opts.get('summary'))
1599 summary=opts.get('summary'))
1595
1600
1596 def new(ui, repo, patch, **opts):
1601 def new(ui, repo, patch, **opts):
1597 """create a new patch
1602 """create a new patch
1598
1603
1599 qnew creates a new patch on top of the currently-applied patch
1604 qnew creates a new patch on top of the currently-applied patch
1600 (if any). It will refuse to run if there are any outstanding
1605 (if any). It will refuse to run if there are any outstanding
1601 changes unless -f is specified, in which case the patch will
1606 changes unless -f is specified, in which case the patch will
1602 be initialised with them.
1607 be initialised with them.
1603
1608
1604 -e, -m or -l set the patch header as well as the commit message.
1609 -e, -m or -l set the patch header as well as the commit message.
1605 If none is specified, the patch header is empty and the
1610 If none is specified, the patch header is empty and the
1606 commit message is 'New patch: PATCH'"""
1611 commit message is 'New patch: PATCH'"""
1607 q = repo.mq
1612 q = repo.mq
1608 message = commands.logmessage(opts)
1613 message = commands.logmessage(opts)
1609 if opts['edit']:
1614 if opts['edit']:
1610 message = ui.edit(message, ui.username())
1615 message = ui.edit(message, ui.username())
1611 q.new(repo, patch, msg=message, force=opts['force'])
1616 q.new(repo, patch, msg=message, force=opts['force'])
1612 q.save_dirty()
1617 q.save_dirty()
1613 return 0
1618 return 0
1614
1619
1615 def refresh(ui, repo, *pats, **opts):
1620 def refresh(ui, repo, *pats, **opts):
1616 """update the current patch
1621 """update the current patch
1617
1622
1618 If any file patterns are provided, the refreshed patch will contain only
1623 If any file patterns are provided, the refreshed patch will contain only
1619 the modifications that match those patterns; the remaining modifications
1624 the modifications that match those patterns; the remaining modifications
1620 will remain in the working directory.
1625 will remain in the working directory.
1621
1626
1622 hg add/remove/copy/rename work as usual, though you might want to use
1627 hg add/remove/copy/rename work as usual, though you might want to use
1623 git-style patches (--git or [diff] git=1) to track copies and renames.
1628 git-style patches (--git or [diff] git=1) to track copies and renames.
1624 """
1629 """
1625 q = repo.mq
1630 q = repo.mq
1626 message = commands.logmessage(opts)
1631 message = commands.logmessage(opts)
1627 if opts['edit']:
1632 if opts['edit']:
1628 if message:
1633 if message:
1629 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1634 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1630 patch = q.applied[-1].name
1635 patch = q.applied[-1].name
1631 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1636 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1632 message = ui.edit('\n'.join(message), user or ui.username())
1637 message = ui.edit('\n'.join(message), user or ui.username())
1633 ret = q.refresh(repo, pats, msg=message, **opts)
1638 ret = q.refresh(repo, pats, msg=message, **opts)
1634 q.save_dirty()
1639 q.save_dirty()
1635 return ret
1640 return ret
1636
1641
1637 def diff(ui, repo, *pats, **opts):
1642 def diff(ui, repo, *pats, **opts):
1638 """diff of the current patch"""
1643 """diff of the current patch"""
1639 repo.mq.diff(repo, pats, opts)
1644 repo.mq.diff(repo, pats, opts)
1640 return 0
1645 return 0
1641
1646
1642 def fold(ui, repo, *files, **opts):
1647 def fold(ui, repo, *files, **opts):
1643 """fold the named patches into the current patch
1648 """fold the named patches into the current patch
1644
1649
1645 Patches must not yet be applied. Each patch will be successively
1650 Patches must not yet be applied. Each patch will be successively
1646 applied to the current patch in the order given. If all the
1651 applied to the current patch in the order given. If all the
1647 patches apply successfully, the current patch will be refreshed
1652 patches apply successfully, the current patch will be refreshed
1648 with the new cumulative patch, and the folded patches will
1653 with the new cumulative patch, and the folded patches will
1649 be deleted. With -k/--keep, the folded patch files will not
1654 be deleted. With -k/--keep, the folded patch files will not
1650 be removed afterwards.
1655 be removed afterwards.
1651
1656
1652 The header for each folded patch will be concatenated with
1657 The header for each folded patch will be concatenated with
1653 the current patch header, separated by a line of '* * *'."""
1658 the current patch header, separated by a line of '* * *'."""
1654
1659
1655 q = repo.mq
1660 q = repo.mq
1656
1661
1657 if not files:
1662 if not files:
1658 raise util.Abort(_('qfold requires at least one patch name'))
1663 raise util.Abort(_('qfold requires at least one patch name'))
1659 if not q.check_toppatch(repo):
1664 if not q.check_toppatch(repo):
1660 raise util.Abort(_('No patches applied'))
1665 raise util.Abort(_('No patches applied'))
1661
1666
1662 message = commands.logmessage(opts)
1667 message = commands.logmessage(opts)
1663 if opts['edit']:
1668 if opts['edit']:
1664 if message:
1669 if message:
1665 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1670 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1666
1671
1667 parent = q.lookup('qtip')
1672 parent = q.lookup('qtip')
1668 patches = []
1673 patches = []
1669 messages = []
1674 messages = []
1670 for f in files:
1675 for f in files:
1671 p = q.lookup(f)
1676 p = q.lookup(f)
1672 if p in patches or p == parent:
1677 if p in patches or p == parent:
1673 ui.warn(_('Skipping already folded patch %s') % p)
1678 ui.warn(_('Skipping already folded patch %s') % p)
1674 if q.isapplied(p):
1679 if q.isapplied(p):
1675 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1680 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1676 patches.append(p)
1681 patches.append(p)
1677
1682
1678 for p in patches:
1683 for p in patches:
1679 if not message:
1684 if not message:
1680 messages.append(q.readheaders(p)[0])
1685 messages.append(q.readheaders(p)[0])
1681 pf = q.join(p)
1686 pf = q.join(p)
1682 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1687 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1683 if not patchsuccess:
1688 if not patchsuccess:
1684 raise util.Abort(_('Error folding patch %s') % p)
1689 raise util.Abort(_('Error folding patch %s') % p)
1685 patch.updatedir(ui, repo, files)
1690 patch.updatedir(ui, repo, files)
1686
1691
1687 if not message:
1692 if not message:
1688 message, comments, user = q.readheaders(parent)[0:3]
1693 message, comments, user = q.readheaders(parent)[0:3]
1689 for msg in messages:
1694 for msg in messages:
1690 message.append('* * *')
1695 message.append('* * *')
1691 message.extend(msg)
1696 message.extend(msg)
1692 message = '\n'.join(message)
1697 message = '\n'.join(message)
1693
1698
1694 if opts['edit']:
1699 if opts['edit']:
1695 message = ui.edit(message, user or ui.username())
1700 message = ui.edit(message, user or ui.username())
1696
1701
1697 q.refresh(repo, msg=message)
1702 q.refresh(repo, msg=message)
1698 q.delete(repo, patches, opts)
1703 q.delete(repo, patches, opts)
1699 q.save_dirty()
1704 q.save_dirty()
1700
1705
1701 def guard(ui, repo, *args, **opts):
1706 def guard(ui, repo, *args, **opts):
1702 '''set or print guards for a patch
1707 '''set or print guards for a patch
1703
1708
1704 Guards control whether a patch can be pushed. A patch with no
1709 Guards control whether a patch can be pushed. A patch with no
1705 guards is always pushed. A patch with a positive guard ("+foo") is
1710 guards is always pushed. A patch with a positive guard ("+foo") is
1706 pushed only if the qselect command has activated it. A patch with
1711 pushed only if the qselect command has activated it. A patch with
1707 a negative guard ("-foo") is never pushed if the qselect command
1712 a negative guard ("-foo") is never pushed if the qselect command
1708 has activated it.
1713 has activated it.
1709
1714
1710 With no arguments, print the currently active guards.
1715 With no arguments, print the currently active guards.
1711 With arguments, set guards for the named patch.
1716 With arguments, set guards for the named patch.
1712
1717
1713 To set a negative guard "-foo" on topmost patch ("--" is needed so
1718 To set a negative guard "-foo" on topmost patch ("--" is needed so
1714 hg will not interpret "-foo" as an option):
1719 hg will not interpret "-foo" as an option):
1715 hg qguard -- -foo
1720 hg qguard -- -foo
1716
1721
1717 To set guards on another patch:
1722 To set guards on another patch:
1718 hg qguard other.patch +2.6.17 -stable
1723 hg qguard other.patch +2.6.17 -stable
1719 '''
1724 '''
1720 def status(idx):
1725 def status(idx):
1721 guards = q.series_guards[idx] or ['unguarded']
1726 guards = q.series_guards[idx] or ['unguarded']
1722 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1727 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1723 q = repo.mq
1728 q = repo.mq
1724 patch = None
1729 patch = None
1725 args = list(args)
1730 args = list(args)
1726 if opts['list']:
1731 if opts['list']:
1727 if args or opts['none']:
1732 if args or opts['none']:
1728 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1733 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1729 for i in xrange(len(q.series)):
1734 for i in xrange(len(q.series)):
1730 status(i)
1735 status(i)
1731 return
1736 return
1732 if not args or args[0][0:1] in '-+':
1737 if not args or args[0][0:1] in '-+':
1733 if not q.applied:
1738 if not q.applied:
1734 raise util.Abort(_('no patches applied'))
1739 raise util.Abort(_('no patches applied'))
1735 patch = q.applied[-1].name
1740 patch = q.applied[-1].name
1736 if patch is None and args[0][0:1] not in '-+':
1741 if patch is None and args[0][0:1] not in '-+':
1737 patch = args.pop(0)
1742 patch = args.pop(0)
1738 if patch is None:
1743 if patch is None:
1739 raise util.Abort(_('no patch to work with'))
1744 raise util.Abort(_('no patch to work with'))
1740 if args or opts['none']:
1745 if args or opts['none']:
1741 idx = q.find_series(patch)
1746 idx = q.find_series(patch)
1742 if idx is None:
1747 if idx is None:
1743 raise util.Abort(_('no patch named %s') % patch)
1748 raise util.Abort(_('no patch named %s') % patch)
1744 q.set_guards(idx, args)
1749 q.set_guards(idx, args)
1745 q.save_dirty()
1750 q.save_dirty()
1746 else:
1751 else:
1747 status(q.series.index(q.lookup(patch)))
1752 status(q.series.index(q.lookup(patch)))
1748
1753
1749 def header(ui, repo, patch=None):
1754 def header(ui, repo, patch=None):
1750 """Print the header of the topmost or specified patch"""
1755 """Print the header of the topmost or specified patch"""
1751 q = repo.mq
1756 q = repo.mq
1752
1757
1753 if patch:
1758 if patch:
1754 patch = q.lookup(patch)
1759 patch = q.lookup(patch)
1755 else:
1760 else:
1756 if not q.applied:
1761 if not q.applied:
1757 ui.write('No patches applied\n')
1762 ui.write('No patches applied\n')
1758 return 1
1763 return 1
1759 patch = q.lookup('qtip')
1764 patch = q.lookup('qtip')
1760 message = repo.mq.readheaders(patch)[0]
1765 message = repo.mq.readheaders(patch)[0]
1761
1766
1762 ui.write('\n'.join(message) + '\n')
1767 ui.write('\n'.join(message) + '\n')
1763
1768
1764 def lastsavename(path):
1769 def lastsavename(path):
1765 (directory, base) = os.path.split(path)
1770 (directory, base) = os.path.split(path)
1766 names = os.listdir(directory)
1771 names = os.listdir(directory)
1767 namere = re.compile("%s.([0-9]+)" % base)
1772 namere = re.compile("%s.([0-9]+)" % base)
1768 maxindex = None
1773 maxindex = None
1769 maxname = None
1774 maxname = None
1770 for f in names:
1775 for f in names:
1771 m = namere.match(f)
1776 m = namere.match(f)
1772 if m:
1777 if m:
1773 index = int(m.group(1))
1778 index = int(m.group(1))
1774 if maxindex == None or index > maxindex:
1779 if maxindex == None or index > maxindex:
1775 maxindex = index
1780 maxindex = index
1776 maxname = f
1781 maxname = f
1777 if maxname:
1782 if maxname:
1778 return (os.path.join(directory, maxname), maxindex)
1783 return (os.path.join(directory, maxname), maxindex)
1779 return (None, None)
1784 return (None, None)
1780
1785
1781 def savename(path):
1786 def savename(path):
1782 (last, index) = lastsavename(path)
1787 (last, index) = lastsavename(path)
1783 if last is None:
1788 if last is None:
1784 index = 0
1789 index = 0
1785 newpath = path + ".%d" % (index + 1)
1790 newpath = path + ".%d" % (index + 1)
1786 return newpath
1791 return newpath
1787
1792
1788 def push(ui, repo, patch=None, **opts):
1793 def push(ui, repo, patch=None, **opts):
1789 """push the next patch onto the stack"""
1794 """push the next patch onto the stack"""
1790 q = repo.mq
1795 q = repo.mq
1791 mergeq = None
1796 mergeq = None
1792
1797
1793 if opts['all']:
1798 if opts['all']:
1794 if not q.series:
1799 if not q.series:
1795 ui.warn(_('no patches in series\n'))
1800 ui.warn(_('no patches in series\n'))
1796 return 0
1801 return 0
1797 patch = q.series[-1]
1802 patch = q.series[-1]
1798 if opts['merge']:
1803 if opts['merge']:
1799 if opts['name']:
1804 if opts['name']:
1800 newpath = opts['name']
1805 newpath = opts['name']
1801 else:
1806 else:
1802 newpath, i = lastsavename(q.path)
1807 newpath, i = lastsavename(q.path)
1803 if not newpath:
1808 if not newpath:
1804 ui.warn("no saved queues found, please use -n\n")
1809 ui.warn("no saved queues found, please use -n\n")
1805 return 1
1810 return 1
1806 mergeq = queue(ui, repo.join(""), newpath)
1811 mergeq = queue(ui, repo.join(""), newpath)
1807 ui.warn("merging with queue at: %s\n" % mergeq.path)
1812 ui.warn("merging with queue at: %s\n" % mergeq.path)
1808 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1813 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1809 mergeq=mergeq)
1814 mergeq=mergeq)
1810 q.save_dirty()
1815 q.save_dirty()
1811 return ret
1816 return ret
1812
1817
1813 def pop(ui, repo, patch=None, **opts):
1818 def pop(ui, repo, patch=None, **opts):
1814 """pop the current patch off the stack"""
1819 """pop the current patch off the stack"""
1815 localupdate = True
1820 localupdate = True
1816 if opts['name']:
1821 if opts['name']:
1817 q = queue(ui, repo.join(""), repo.join(opts['name']))
1822 q = queue(ui, repo.join(""), repo.join(opts['name']))
1818 ui.warn('using patch queue: %s\n' % q.path)
1823 ui.warn('using patch queue: %s\n' % q.path)
1819 localupdate = False
1824 localupdate = False
1820 else:
1825 else:
1821 q = repo.mq
1826 q = repo.mq
1822 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1827 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1823 all=opts['all'])
1828 all=opts['all'])
1824 q.save_dirty()
1829 q.save_dirty()
1825 return ret
1830 return ret
1826
1831
1827 def rename(ui, repo, patch, name=None, **opts):
1832 def rename(ui, repo, patch, name=None, **opts):
1828 """rename a patch
1833 """rename a patch
1829
1834
1830 With one argument, renames the current patch to PATCH1.
1835 With one argument, renames the current patch to PATCH1.
1831 With two arguments, renames PATCH1 to PATCH2."""
1836 With two arguments, renames PATCH1 to PATCH2."""
1832
1837
1833 q = repo.mq
1838 q = repo.mq
1834
1839
1835 if not name:
1840 if not name:
1836 name = patch
1841 name = patch
1837 patch = None
1842 patch = None
1838
1843
1839 if patch:
1844 if patch:
1840 patch = q.lookup(patch)
1845 patch = q.lookup(patch)
1841 else:
1846 else:
1842 if not q.applied:
1847 if not q.applied:
1843 ui.write(_('No patches applied\n'))
1848 ui.write(_('No patches applied\n'))
1844 return
1849 return
1845 patch = q.lookup('qtip')
1850 patch = q.lookup('qtip')
1846 absdest = q.join(name)
1851 absdest = q.join(name)
1847 if os.path.isdir(absdest):
1852 if os.path.isdir(absdest):
1848 name = normname(os.path.join(name, os.path.basename(patch)))
1853 name = normname(os.path.join(name, os.path.basename(patch)))
1849 absdest = q.join(name)
1854 absdest = q.join(name)
1850 if os.path.exists(absdest):
1855 if os.path.exists(absdest):
1851 raise util.Abort(_('%s already exists') % absdest)
1856 raise util.Abort(_('%s already exists') % absdest)
1852
1857
1853 if name in q.series:
1858 if name in q.series:
1854 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1859 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1855
1860
1856 if ui.verbose:
1861 if ui.verbose:
1857 ui.write('Renaming %s to %s\n' % (patch, name))
1862 ui.write('Renaming %s to %s\n' % (patch, name))
1858 i = q.find_series(patch)
1863 i = q.find_series(patch)
1859 guards = q.guard_re.findall(q.full_series[i])
1864 guards = q.guard_re.findall(q.full_series[i])
1860 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1865 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1861 q.parse_series()
1866 q.parse_series()
1862 q.series_dirty = 1
1867 q.series_dirty = 1
1863
1868
1864 info = q.isapplied(patch)
1869 info = q.isapplied(patch)
1865 if info:
1870 if info:
1866 q.applied[info[0]] = statusentry(info[1], name)
1871 q.applied[info[0]] = statusentry(info[1], name)
1867 q.applied_dirty = 1
1872 q.applied_dirty = 1
1868
1873
1869 util.rename(q.join(patch), absdest)
1874 util.rename(q.join(patch), absdest)
1870 r = q.qrepo()
1875 r = q.qrepo()
1871 if r:
1876 if r:
1872 wlock = r.wlock()
1877 wlock = r.wlock()
1873 if r.dirstate.state(name) == 'r':
1878 if r.dirstate.state(name) == 'r':
1874 r.undelete([name], wlock)
1879 r.undelete([name], wlock)
1875 r.copy(patch, name, wlock)
1880 r.copy(patch, name, wlock)
1876 r.remove([patch], False, wlock)
1881 r.remove([patch], False, wlock)
1877
1882
1878 q.save_dirty()
1883 q.save_dirty()
1879
1884
1880 def restore(ui, repo, rev, **opts):
1885 def restore(ui, repo, rev, **opts):
1881 """restore the queue state saved by a rev"""
1886 """restore the queue state saved by a rev"""
1882 rev = repo.lookup(rev)
1887 rev = repo.lookup(rev)
1883 q = repo.mq
1888 q = repo.mq
1884 q.restore(repo, rev, delete=opts['delete'],
1889 q.restore(repo, rev, delete=opts['delete'],
1885 qupdate=opts['update'])
1890 qupdate=opts['update'])
1886 q.save_dirty()
1891 q.save_dirty()
1887 return 0
1892 return 0
1888
1893
1889 def save(ui, repo, **opts):
1894 def save(ui, repo, **opts):
1890 """save current queue state"""
1895 """save current queue state"""
1891 q = repo.mq
1896 q = repo.mq
1892 message = commands.logmessage(opts)
1897 message = commands.logmessage(opts)
1893 ret = q.save(repo, msg=message)
1898 ret = q.save(repo, msg=message)
1894 if ret:
1899 if ret:
1895 return ret
1900 return ret
1896 q.save_dirty()
1901 q.save_dirty()
1897 if opts['copy']:
1902 if opts['copy']:
1898 path = q.path
1903 path = q.path
1899 if opts['name']:
1904 if opts['name']:
1900 newpath = os.path.join(q.basepath, opts['name'])
1905 newpath = os.path.join(q.basepath, opts['name'])
1901 if os.path.exists(newpath):
1906 if os.path.exists(newpath):
1902 if not os.path.isdir(newpath):
1907 if not os.path.isdir(newpath):
1903 raise util.Abort(_('destination %s exists and is not '
1908 raise util.Abort(_('destination %s exists and is not '
1904 'a directory') % newpath)
1909 'a directory') % newpath)
1905 if not opts['force']:
1910 if not opts['force']:
1906 raise util.Abort(_('destination %s exists, '
1911 raise util.Abort(_('destination %s exists, '
1907 'use -f to force') % newpath)
1912 'use -f to force') % newpath)
1908 else:
1913 else:
1909 newpath = savename(path)
1914 newpath = savename(path)
1910 ui.warn("copy %s to %s\n" % (path, newpath))
1915 ui.warn("copy %s to %s\n" % (path, newpath))
1911 util.copyfiles(path, newpath)
1916 util.copyfiles(path, newpath)
1912 if opts['empty']:
1917 if opts['empty']:
1913 try:
1918 try:
1914 os.unlink(q.join(q.status_path))
1919 os.unlink(q.join(q.status_path))
1915 except:
1920 except:
1916 pass
1921 pass
1917 return 0
1922 return 0
1918
1923
1919 def strip(ui, repo, rev, **opts):
1924 def strip(ui, repo, rev, **opts):
1920 """strip a revision and all later revs on the same branch"""
1925 """strip a revision and all later revs on the same branch"""
1921 rev = repo.lookup(rev)
1926 rev = repo.lookup(rev)
1922 backup = 'all'
1927 backup = 'all'
1923 if opts['backup']:
1928 if opts['backup']:
1924 backup = 'strip'
1929 backup = 'strip'
1925 elif opts['nobackup']:
1930 elif opts['nobackup']:
1926 backup = 'none'
1931 backup = 'none'
1927 update = repo.dirstate.parents()[0] != revlog.nullid
1932 update = repo.dirstate.parents()[0] != revlog.nullid
1928 repo.mq.strip(repo, rev, backup=backup, update=update)
1933 repo.mq.strip(repo, rev, backup=backup, update=update)
1929 return 0
1934 return 0
1930
1935
1931 def select(ui, repo, *args, **opts):
1936 def select(ui, repo, *args, **opts):
1932 '''set or print guarded patches to push
1937 '''set or print guarded patches to push
1933
1938
1934 Use the qguard command to set or print guards on patch, then use
1939 Use the qguard command to set or print guards on patch, then use
1935 qselect to tell mq which guards to use. A patch will be pushed if it
1940 qselect to tell mq which guards to use. A patch will be pushed if it
1936 has no guards or any positive guards match the currently selected guard,
1941 has no guards or any positive guards match the currently selected guard,
1937 but will not be pushed if any negative guards match the current guard.
1942 but will not be pushed if any negative guards match the current guard.
1938 For example:
1943 For example:
1939
1944
1940 qguard foo.patch -stable (negative guard)
1945 qguard foo.patch -stable (negative guard)
1941 qguard bar.patch +stable (positive guard)
1946 qguard bar.patch +stable (positive guard)
1942 qselect stable
1947 qselect stable
1943
1948
1944 This activates the "stable" guard. mq will skip foo.patch (because
1949 This activates the "stable" guard. mq will skip foo.patch (because
1945 it has a negative match) but push bar.patch (because it
1950 it has a negative match) but push bar.patch (because it
1946 has a positive match).
1951 has a positive match).
1947
1952
1948 With no arguments, prints the currently active guards.
1953 With no arguments, prints the currently active guards.
1949 With one argument, sets the active guard.
1954 With one argument, sets the active guard.
1950
1955
1951 Use -n/--none to deactivate guards (no other arguments needed).
1956 Use -n/--none to deactivate guards (no other arguments needed).
1952 When no guards are active, patches with positive guards are skipped
1957 When no guards are active, patches with positive guards are skipped
1953 and patches with negative guards are pushed.
1958 and patches with negative guards are pushed.
1954
1959
1955 qselect can change the guards on applied patches. It does not pop
1960 qselect can change the guards on applied patches. It does not pop
1956 guarded patches by default. Use --pop to pop back to the last applied
1961 guarded patches by default. Use --pop to pop back to the last applied
1957 patch that is not guarded. Use --reapply (which implies --pop) to push
1962 patch that is not guarded. Use --reapply (which implies --pop) to push
1958 back to the current patch afterwards, but skip guarded patches.
1963 back to the current patch afterwards, but skip guarded patches.
1959
1964
1960 Use -s/--series to print a list of all guards in the series file (no
1965 Use -s/--series to print a list of all guards in the series file (no
1961 other arguments needed). Use -v for more information.'''
1966 other arguments needed). Use -v for more information.'''
1962
1967
1963 q = repo.mq
1968 q = repo.mq
1964 guards = q.active()
1969 guards = q.active()
1965 if args or opts['none']:
1970 if args or opts['none']:
1966 old_unapplied = q.unapplied(repo)
1971 old_unapplied = q.unapplied(repo)
1967 old_guarded = [i for i in xrange(len(q.applied)) if
1972 old_guarded = [i for i in xrange(len(q.applied)) if
1968 not q.pushable(i)[0]]
1973 not q.pushable(i)[0]]
1969 q.set_active(args)
1974 q.set_active(args)
1970 q.save_dirty()
1975 q.save_dirty()
1971 if not args:
1976 if not args:
1972 ui.status(_('guards deactivated\n'))
1977 ui.status(_('guards deactivated\n'))
1973 if not opts['pop'] and not opts['reapply']:
1978 if not opts['pop'] and not opts['reapply']:
1974 unapplied = q.unapplied(repo)
1979 unapplied = q.unapplied(repo)
1975 guarded = [i for i in xrange(len(q.applied))
1980 guarded = [i for i in xrange(len(q.applied))
1976 if not q.pushable(i)[0]]
1981 if not q.pushable(i)[0]]
1977 if len(unapplied) != len(old_unapplied):
1982 if len(unapplied) != len(old_unapplied):
1978 ui.status(_('number of unguarded, unapplied patches has '
1983 ui.status(_('number of unguarded, unapplied patches has '
1979 'changed from %d to %d\n') %
1984 'changed from %d to %d\n') %
1980 (len(old_unapplied), len(unapplied)))
1985 (len(old_unapplied), len(unapplied)))
1981 if len(guarded) != len(old_guarded):
1986 if len(guarded) != len(old_guarded):
1982 ui.status(_('number of guarded, applied patches has changed '
1987 ui.status(_('number of guarded, applied patches has changed '
1983 'from %d to %d\n') %
1988 'from %d to %d\n') %
1984 (len(old_guarded), len(guarded)))
1989 (len(old_guarded), len(guarded)))
1985 elif opts['series']:
1990 elif opts['series']:
1986 guards = {}
1991 guards = {}
1987 noguards = 0
1992 noguards = 0
1988 for gs in q.series_guards:
1993 for gs in q.series_guards:
1989 if not gs:
1994 if not gs:
1990 noguards += 1
1995 noguards += 1
1991 for g in gs:
1996 for g in gs:
1992 guards.setdefault(g, 0)
1997 guards.setdefault(g, 0)
1993 guards[g] += 1
1998 guards[g] += 1
1994 if ui.verbose:
1999 if ui.verbose:
1995 guards['NONE'] = noguards
2000 guards['NONE'] = noguards
1996 guards = guards.items()
2001 guards = guards.items()
1997 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2002 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
1998 if guards:
2003 if guards:
1999 ui.note(_('guards in series file:\n'))
2004 ui.note(_('guards in series file:\n'))
2000 for guard, count in guards:
2005 for guard, count in guards:
2001 ui.note('%2d ' % count)
2006 ui.note('%2d ' % count)
2002 ui.write(guard, '\n')
2007 ui.write(guard, '\n')
2003 else:
2008 else:
2004 ui.note(_('no guards in series file\n'))
2009 ui.note(_('no guards in series file\n'))
2005 else:
2010 else:
2006 if guards:
2011 if guards:
2007 ui.note(_('active guards:\n'))
2012 ui.note(_('active guards:\n'))
2008 for g in guards:
2013 for g in guards:
2009 ui.write(g, '\n')
2014 ui.write(g, '\n')
2010 else:
2015 else:
2011 ui.write(_('no active guards\n'))
2016 ui.write(_('no active guards\n'))
2012 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2017 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2013 popped = False
2018 popped = False
2014 if opts['pop'] or opts['reapply']:
2019 if opts['pop'] or opts['reapply']:
2015 for i in xrange(len(q.applied)):
2020 for i in xrange(len(q.applied)):
2016 pushable, reason = q.pushable(i)
2021 pushable, reason = q.pushable(i)
2017 if not pushable:
2022 if not pushable:
2018 ui.status(_('popping guarded patches\n'))
2023 ui.status(_('popping guarded patches\n'))
2019 popped = True
2024 popped = True
2020 if i == 0:
2025 if i == 0:
2021 q.pop(repo, all=True)
2026 q.pop(repo, all=True)
2022 else:
2027 else:
2023 q.pop(repo, i-1)
2028 q.pop(repo, i-1)
2024 break
2029 break
2025 if popped:
2030 if popped:
2026 try:
2031 try:
2027 if reapply:
2032 if reapply:
2028 ui.status(_('reapplying unguarded patches\n'))
2033 ui.status(_('reapplying unguarded patches\n'))
2029 q.push(repo, reapply)
2034 q.push(repo, reapply)
2030 finally:
2035 finally:
2031 q.save_dirty()
2036 q.save_dirty()
2032
2037
2033 def reposetup(ui, repo):
2038 def reposetup(ui, repo):
2034 class mqrepo(repo.__class__):
2039 class mqrepo(repo.__class__):
2035 def abort_if_wdir_patched(self, errmsg, force=False):
2040 def abort_if_wdir_patched(self, errmsg, force=False):
2036 if self.mq.applied and not force:
2041 if self.mq.applied and not force:
2037 parent = revlog.hex(self.dirstate.parents()[0])
2042 parent = revlog.hex(self.dirstate.parents()[0])
2038 if parent in [s.rev for s in self.mq.applied]:
2043 if parent in [s.rev for s in self.mq.applied]:
2039 raise util.Abort(errmsg)
2044 raise util.Abort(errmsg)
2040
2045
2041 def commit(self, *args, **opts):
2046 def commit(self, *args, **opts):
2042 if len(args) >= 6:
2047 if len(args) >= 6:
2043 force = args[5]
2048 force = args[5]
2044 else:
2049 else:
2045 force = opts.get('force')
2050 force = opts.get('force')
2046 self.abort_if_wdir_patched(
2051 self.abort_if_wdir_patched(
2047 _('cannot commit over an applied mq patch'),
2052 _('cannot commit over an applied mq patch'),
2048 force)
2053 force)
2049
2054
2050 return super(mqrepo, self).commit(*args, **opts)
2055 return super(mqrepo, self).commit(*args, **opts)
2051
2056
2052 def push(self, remote, force=False, revs=None):
2057 def push(self, remote, force=False, revs=None):
2053 if self.mq.applied and not force and not revs:
2058 if self.mq.applied and not force and not revs:
2054 raise util.Abort(_('source has mq patches applied'))
2059 raise util.Abort(_('source has mq patches applied'))
2055 return super(mqrepo, self).push(remote, force, revs)
2060 return super(mqrepo, self).push(remote, force, revs)
2056
2061
2057 def tags(self):
2062 def tags(self):
2058 if self.tagscache:
2063 if self.tagscache:
2059 return self.tagscache
2064 return self.tagscache
2060
2065
2061 tagscache = super(mqrepo, self).tags()
2066 tagscache = super(mqrepo, self).tags()
2062
2067
2063 q = self.mq
2068 q = self.mq
2064 if not q.applied:
2069 if not q.applied:
2065 return tagscache
2070 return tagscache
2066
2071
2067 mqtags = [(patch.rev, patch.name) for patch in q.applied]
2072 mqtags = [(patch.rev, patch.name) for patch in q.applied]
2068 mqtags.append((mqtags[-1][0], 'qtip'))
2073 mqtags.append((mqtags[-1][0], 'qtip'))
2069 mqtags.append((mqtags[0][0], 'qbase'))
2074 mqtags.append((mqtags[0][0], 'qbase'))
2070 for patch in mqtags:
2075 for patch in mqtags:
2071 if patch[1] in tagscache:
2076 if patch[1] in tagscache:
2072 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2077 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2073 else:
2078 else:
2074 tagscache[patch[1]] = revlog.bin(patch[0])
2079 tagscache[patch[1]] = revlog.bin(patch[0])
2075
2080
2076 return tagscache
2081 return tagscache
2077
2082
2078 def _branchtags(self):
2083 def _branchtags(self):
2079 q = self.mq
2084 q = self.mq
2080 if not q.applied:
2085 if not q.applied:
2081 return super(mqrepo, self)._branchtags()
2086 return super(mqrepo, self)._branchtags()
2082
2087
2083 self.branchcache = {} # avoid recursion in changectx
2088 self.branchcache = {} # avoid recursion in changectx
2084 cl = self.changelog
2089 cl = self.changelog
2085 partial, last, lrev = self._readbranchcache()
2090 partial, last, lrev = self._readbranchcache()
2086
2091
2087 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2092 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2088 start = lrev + 1
2093 start = lrev + 1
2089 if start < qbase:
2094 if start < qbase:
2090 # update the cache (excluding the patches) and save it
2095 # update the cache (excluding the patches) and save it
2091 self._updatebranchcache(partial, lrev+1, qbase)
2096 self._updatebranchcache(partial, lrev+1, qbase)
2092 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2097 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2093 start = qbase
2098 start = qbase
2094 # if start = qbase, the cache is as updated as it should be.
2099 # if start = qbase, the cache is as updated as it should be.
2095 # if start > qbase, the cache includes (part of) the patches.
2100 # if start > qbase, the cache includes (part of) the patches.
2096 # we might as well use it, but we won't save it.
2101 # we might as well use it, but we won't save it.
2097
2102
2098 # update the cache up to the tip
2103 # update the cache up to the tip
2099 self._updatebranchcache(partial, start, cl.count())
2104 self._updatebranchcache(partial, start, cl.count())
2100
2105
2101 return partial
2106 return partial
2102
2107
2103 if repo.local():
2108 if repo.local():
2104 repo.__class__ = mqrepo
2109 repo.__class__ = mqrepo
2105 repo.mq = queue(ui, repo.join(""))
2110 repo.mq = queue(ui, repo.join(""))
2106
2111
2107 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2112 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2108
2113
2109 cmdtable = {
2114 cmdtable = {
2110 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2115 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2111 "qclone": (clone,
2116 "qclone": (clone,
2112 [('', 'pull', None, _('use pull protocol to copy metadata')),
2117 [('', 'pull', None, _('use pull protocol to copy metadata')),
2113 ('U', 'noupdate', None, _('do not update the new working directories')),
2118 ('U', 'noupdate', None, _('do not update the new working directories')),
2114 ('', 'uncompressed', None,
2119 ('', 'uncompressed', None,
2115 _('use uncompressed transfer (fast over LAN)')),
2120 _('use uncompressed transfer (fast over LAN)')),
2116 ('e', 'ssh', '', _('specify ssh command to use')),
2121 ('e', 'ssh', '', _('specify ssh command to use')),
2117 ('p', 'patches', '', _('location of source patch repo')),
2122 ('p', 'patches', '', _('location of source patch repo')),
2118 ('', 'remotecmd', '',
2123 ('', 'remotecmd', '',
2119 _('specify hg command to run on the remote side'))],
2124 _('specify hg command to run on the remote side'))],
2120 'hg qclone [OPTION]... SOURCE [DEST]'),
2125 'hg qclone [OPTION]... SOURCE [DEST]'),
2121 "qcommit|qci":
2126 "qcommit|qci":
2122 (commit,
2127 (commit,
2123 commands.table["^commit|ci"][1],
2128 commands.table["^commit|ci"][1],
2124 'hg qcommit [OPTION]... [FILE]...'),
2129 'hg qcommit [OPTION]... [FILE]...'),
2125 "^qdiff": (diff,
2130 "^qdiff": (diff,
2126 [('g', 'git', None, _('use git extended diff format')),
2131 [('g', 'git', None, _('use git extended diff format')),
2127 ('I', 'include', [], _('include names matching the given patterns')),
2132 ('I', 'include', [], _('include names matching the given patterns')),
2128 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2133 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2129 'hg qdiff [-I] [-X] [FILE]...'),
2134 'hg qdiff [-I] [-X] [FILE]...'),
2130 "qdelete|qremove|qrm":
2135 "qdelete|qremove|qrm":
2131 (delete,
2136 (delete,
2132 [('k', 'keep', None, _('keep patch file')),
2137 [('k', 'keep', None, _('keep patch file')),
2133 ('r', 'rev', [], _('stop managing a revision'))],
2138 ('r', 'rev', [], _('stop managing a revision'))],
2134 'hg qdelete [-k] [-r REV]... PATCH...'),
2139 'hg qdelete [-k] [-r REV]... PATCH...'),
2135 'qfold':
2140 'qfold':
2136 (fold,
2141 (fold,
2137 [('e', 'edit', None, _('edit patch header')),
2142 [('e', 'edit', None, _('edit patch header')),
2138 ('k', 'keep', None, _('keep folded patch files'))
2143 ('k', 'keep', None, _('keep folded patch files'))
2139 ] + commands.commitopts,
2144 ] + commands.commitopts,
2140 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2145 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2141 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2146 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2142 ('n', 'none', None, _('drop all guards'))],
2147 ('n', 'none', None, _('drop all guards'))],
2143 'hg qguard [PATCH] [+GUARD...] [-GUARD...]'),
2148 'hg qguard [PATCH] [+GUARD...] [-GUARD...]'),
2144 'qheader': (header, [],
2149 'qheader': (header, [],
2145 _('hg qheader [PATCH]')),
2150 _('hg qheader [PATCH]')),
2146 "^qimport":
2151 "^qimport":
2147 (qimport,
2152 (qimport,
2148 [('e', 'existing', None, 'import file in patch dir'),
2153 [('e', 'existing', None, 'import file in patch dir'),
2149 ('n', 'name', '', 'patch file name'),
2154 ('n', 'name', '', 'patch file name'),
2150 ('f', 'force', None, 'overwrite existing files'),
2155 ('f', 'force', None, 'overwrite existing files'),
2151 ('r', 'rev', [], 'place existing revisions under mq control'),
2156 ('r', 'rev', [], 'place existing revisions under mq control'),
2152 ('g', 'git', None, _('use git extended diff format'))],
2157 ('g', 'git', None, _('use git extended diff format'))],
2153 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2158 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2154 "^qinit":
2159 "^qinit":
2155 (init,
2160 (init,
2156 [('c', 'create-repo', None, 'create queue repository')],
2161 [('c', 'create-repo', None, 'create queue repository')],
2157 'hg qinit [-c]'),
2162 'hg qinit [-c]'),
2158 "qnew":
2163 "qnew":
2159 (new,
2164 (new,
2160 [('e', 'edit', None, _('edit commit message')),
2165 [('e', 'edit', None, _('edit commit message')),
2161 ('f', 'force', None, _('import uncommitted changes into patch'))
2166 ('f', 'force', None, _('import uncommitted changes into patch'))
2162 ] + commands.commitopts,
2167 ] + commands.commitopts,
2163 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2168 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2164 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2169 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2165 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2170 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2166 "^qpop":
2171 "^qpop":
2167 (pop,
2172 (pop,
2168 [('a', 'all', None, 'pop all patches'),
2173 [('a', 'all', None, 'pop all patches'),
2169 ('n', 'name', '', 'queue name to pop'),
2174 ('n', 'name', '', 'queue name to pop'),
2170 ('f', 'force', None, 'forget any local changes')],
2175 ('f', 'force', None, 'forget any local changes')],
2171 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2176 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2172 "^qpush":
2177 "^qpush":
2173 (push,
2178 (push,
2174 [('f', 'force', None, 'apply if the patch has rejects'),
2179 [('f', 'force', None, 'apply if the patch has rejects'),
2175 ('l', 'list', None, 'list patch name in commit text'),
2180 ('l', 'list', None, 'list patch name in commit text'),
2176 ('a', 'all', None, 'apply all patches'),
2181 ('a', 'all', None, 'apply all patches'),
2177 ('m', 'merge', None, 'merge from another queue'),
2182 ('m', 'merge', None, 'merge from another queue'),
2178 ('n', 'name', '', 'merge queue name')],
2183 ('n', 'name', '', 'merge queue name')],
2179 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2184 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2180 "^qrefresh":
2185 "^qrefresh":
2181 (refresh,
2186 (refresh,
2182 [('e', 'edit', None, _('edit commit message')),
2187 [('e', 'edit', None, _('edit commit message')),
2183 ('g', 'git', None, _('use git extended diff format')),
2188 ('g', 'git', None, _('use git extended diff format')),
2184 ('s', 'short', None, 'refresh only files already in the patch'),
2189 ('s', 'short', None, 'refresh only files already in the patch'),
2185 ('I', 'include', [], _('include names matching the given patterns')),
2190 ('I', 'include', [], _('include names matching the given patterns')),
2186 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2191 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2187 ] + commands.commitopts,
2192 ] + commands.commitopts,
2188 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] FILES...'),
2193 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] FILES...'),
2189 'qrename|qmv':
2194 'qrename|qmv':
2190 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2195 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2191 "qrestore":
2196 "qrestore":
2192 (restore,
2197 (restore,
2193 [('d', 'delete', None, 'delete save entry'),
2198 [('d', 'delete', None, 'delete save entry'),
2194 ('u', 'update', None, 'update queue working dir')],
2199 ('u', 'update', None, 'update queue working dir')],
2195 'hg qrestore [-d] [-u] REV'),
2200 'hg qrestore [-d] [-u] REV'),
2196 "qsave":
2201 "qsave":
2197 (save,
2202 (save,
2198 [('c', 'copy', None, 'copy patch directory'),
2203 [('c', 'copy', None, 'copy patch directory'),
2199 ('n', 'name', '', 'copy directory name'),
2204 ('n', 'name', '', 'copy directory name'),
2200 ('e', 'empty', None, 'clear queue status file'),
2205 ('e', 'empty', None, 'clear queue status file'),
2201 ('f', 'force', None, 'force copy')] + commands.commitopts,
2206 ('f', 'force', None, 'force copy')] + commands.commitopts,
2202 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2207 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2203 "qselect": (select,
2208 "qselect": (select,
2204 [('n', 'none', None, _('disable all guards')),
2209 [('n', 'none', None, _('disable all guards')),
2205 ('s', 'series', None, _('list all guards in series file')),
2210 ('s', 'series', None, _('list all guards in series file')),
2206 ('', 'pop', None,
2211 ('', 'pop', None,
2207 _('pop to before first guarded applied patch')),
2212 _('pop to before first guarded applied patch')),
2208 ('', 'reapply', None, _('pop, then reapply patches'))],
2213 ('', 'reapply', None, _('pop, then reapply patches'))],
2209 'hg qselect [OPTION...] [GUARD...]'),
2214 'hg qselect [OPTION...] [GUARD...]'),
2210 "qseries":
2215 "qseries":
2211 (series,
2216 (series,
2212 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2217 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2213 'hg qseries [-ms]'),
2218 'hg qseries [-ms]'),
2214 "^strip":
2219 "^strip":
2215 (strip,
2220 (strip,
2216 [('f', 'force', None, 'force multi-head removal'),
2221 [('f', 'force', None, 'force multi-head removal'),
2217 ('b', 'backup', None, 'bundle unrelated changesets'),
2222 ('b', 'backup', None, 'bundle unrelated changesets'),
2218 ('n', 'nobackup', None, 'no backups')],
2223 ('n', 'nobackup', None, 'no backups')],
2219 'hg strip [-f] [-b] [-n] REV'),
2224 'hg strip [-f] [-b] [-n] REV'),
2220 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2225 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2221 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2226 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2222 }
2227 }
@@ -1,534 +1,534 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import *
10 from node import *
11 from i18n import _
11 from i18n import _
12 import struct, os, time, bisect, stat, strutil, util, re, errno
12 import struct, os, time, bisect, stat, strutil, util, re, errno
13
13
14 class dirstate(object):
14 class dirstate(object):
15 format = ">cllll"
15 format = ">cllll"
16
16
17 def __init__(self, opener, ui, root):
17 def __init__(self, opener, ui, root):
18 self.opener = opener
18 self.opener = opener
19 self.root = root
19 self.root = root
20 self.dirty = 0
20 self.dirty = 0
21 self.ui = ui
21 self.ui = ui
22 self.map = None
22 self.map = None
23 self.pl = None
23 self.pl = None
24 self.dirs = None
24 self.dirs = None
25 self.copymap = {}
25 self.copymap = {}
26 self.ignorefunc = None
26 self.ignorefunc = None
27
27
28 def wjoin(self, f):
28 def wjoin(self, f):
29 return os.path.join(self.root, f)
29 return os.path.join(self.root, f)
30
30
31 def getcwd(self):
31 def getcwd(self):
32 cwd = os.getcwd()
32 cwd = os.getcwd()
33 if cwd == self.root: return ''
33 if cwd == self.root: return ''
34 # self.root ends with a path separator if self.root is '/' or 'C:\'
34 # self.root ends with a path separator if self.root is '/' or 'C:\'
35 common_prefix_len = len(self.root)
35 common_prefix_len = len(self.root)
36 if not self.root.endswith(os.sep):
36 if not self.root.endswith(os.sep):
37 common_prefix_len += 1
37 common_prefix_len += 1
38 return cwd[common_prefix_len:]
38 return cwd[common_prefix_len:]
39
39
40 def hgignore(self):
40 def hgignore(self):
41 '''return the contents of .hgignore files as a list of patterns.
41 '''return the contents of .hgignore files as a list of patterns.
42
42
43 the files parsed for patterns include:
43 the files parsed for patterns include:
44 .hgignore in the repository root
44 .hgignore in the repository root
45 any additional files specified in the [ui] section of ~/.hgrc
45 any additional files specified in the [ui] section of ~/.hgrc
46
46
47 trailing white space is dropped.
47 trailing white space is dropped.
48 the escape character is backslash.
48 the escape character is backslash.
49 comments start with #.
49 comments start with #.
50 empty lines are skipped.
50 empty lines are skipped.
51
51
52 lines can be of the following formats:
52 lines can be of the following formats:
53
53
54 syntax: regexp # defaults following lines to non-rooted regexps
54 syntax: regexp # defaults following lines to non-rooted regexps
55 syntax: glob # defaults following lines to non-rooted globs
55 syntax: glob # defaults following lines to non-rooted globs
56 re:pattern # non-rooted regular expression
56 re:pattern # non-rooted regular expression
57 glob:pattern # non-rooted glob
57 glob:pattern # non-rooted glob
58 pattern # pattern of the current default type'''
58 pattern # pattern of the current default type'''
59 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
59 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
60 def parselines(fp):
60 def parselines(fp):
61 for line in fp:
61 for line in fp:
62 escape = False
62 escape = False
63 for i in xrange(len(line)):
63 for i in xrange(len(line)):
64 if escape: escape = False
64 if escape: escape = False
65 elif line[i] == '\\': escape = True
65 elif line[i] == '\\': escape = True
66 elif line[i] == '#': break
66 elif line[i] == '#': break
67 line = line[:i].rstrip()
67 line = line[:i].rstrip()
68 if line: yield line
68 if line: yield line
69 repoignore = self.wjoin('.hgignore')
69 repoignore = self.wjoin('.hgignore')
70 files = [repoignore]
70 files = [repoignore]
71 files.extend(self.ui.hgignorefiles())
71 files.extend(self.ui.hgignorefiles())
72 pats = {}
72 pats = {}
73 for f in files:
73 for f in files:
74 try:
74 try:
75 pats[f] = []
75 pats[f] = []
76 fp = open(f)
76 fp = open(f)
77 syntax = 'relre:'
77 syntax = 'relre:'
78 for line in parselines(fp):
78 for line in parselines(fp):
79 if line.startswith('syntax:'):
79 if line.startswith('syntax:'):
80 s = line[7:].strip()
80 s = line[7:].strip()
81 try:
81 try:
82 syntax = syntaxes[s]
82 syntax = syntaxes[s]
83 except KeyError:
83 except KeyError:
84 self.ui.warn(_("%s: ignoring invalid "
84 self.ui.warn(_("%s: ignoring invalid "
85 "syntax '%s'\n") % (f, s))
85 "syntax '%s'\n") % (f, s))
86 continue
86 continue
87 pat = syntax + line
87 pat = syntax + line
88 for s in syntaxes.values():
88 for s in syntaxes.values():
89 if line.startswith(s):
89 if line.startswith(s):
90 pat = line
90 pat = line
91 break
91 break
92 pats[f].append(pat)
92 pats[f].append(pat)
93 except IOError, inst:
93 except IOError, inst:
94 if f != repoignore:
94 if f != repoignore:
95 self.ui.warn(_("skipping unreadable ignore file"
95 self.ui.warn(_("skipping unreadable ignore file"
96 " '%s': %s\n") % (f, inst.strerror))
96 " '%s': %s\n") % (f, inst.strerror))
97 return pats
97 return pats
98
98
99 def ignore(self, fn):
99 def ignore(self, fn):
100 '''default match function used by dirstate and
100 '''default match function used by dirstate and
101 localrepository. this honours the repository .hgignore file
101 localrepository. this honours the repository .hgignore file
102 and any other files specified in the [ui] section of .hgrc.'''
102 and any other files specified in the [ui] section of .hgrc.'''
103 if not self.ignorefunc:
103 if not self.ignorefunc:
104 ignore = self.hgignore()
104 ignore = self.hgignore()
105 allpats = []
105 allpats = []
106 [allpats.extend(patlist) for patlist in ignore.values()]
106 [allpats.extend(patlist) for patlist in ignore.values()]
107 if allpats:
107 if allpats:
108 try:
108 try:
109 files, self.ignorefunc, anypats = (
109 files, self.ignorefunc, anypats = (
110 util.matcher(self.root, inc=allpats, src='.hgignore'))
110 util.matcher(self.root, inc=allpats, src='.hgignore'))
111 except util.Abort:
111 except util.Abort:
112 # Re-raise an exception where the src is the right file
112 # Re-raise an exception where the src is the right file
113 for f, patlist in ignore.items():
113 for f, patlist in ignore.items():
114 files, self.ignorefunc, anypats = (
114 files, self.ignorefunc, anypats = (
115 util.matcher(self.root, inc=patlist, src=f))
115 util.matcher(self.root, inc=patlist, src=f))
116 else:
116 else:
117 self.ignorefunc = util.never
117 self.ignorefunc = util.never
118 return self.ignorefunc(fn)
118 return self.ignorefunc(fn)
119
119
120 def __del__(self):
120 def __del__(self):
121 if self.dirty:
121 if self.dirty:
122 self.write()
122 self.write()
123
123
124 def __getitem__(self, key):
124 def __getitem__(self, key):
125 try:
125 try:
126 return self.map[key]
126 return self.map[key]
127 except TypeError:
127 except TypeError:
128 self.lazyread()
128 self.lazyread()
129 return self[key]
129 return self[key]
130
130
131 def __contains__(self, key):
131 def __contains__(self, key):
132 self.lazyread()
132 self.lazyread()
133 return key in self.map
133 return key in self.map
134
134
135 def parents(self):
135 def parents(self):
136 self.lazyread()
136 self.lazyread()
137 return self.pl
137 return self.pl
138
138
139 def markdirty(self):
139 def markdirty(self):
140 if not self.dirty:
140 if not self.dirty:
141 self.dirty = 1
141 self.dirty = 1
142
142
143 def setparents(self, p1, p2=nullid):
143 def setparents(self, p1, p2=nullid):
144 self.lazyread()
144 self.lazyread()
145 self.markdirty()
145 self.markdirty()
146 self.pl = p1, p2
146 self.pl = p1, p2
147
147
148 def state(self, key):
148 def state(self, key):
149 try:
149 try:
150 return self[key][0]
150 return self[key][0]
151 except KeyError:
151 except KeyError:
152 return "?"
152 return "?"
153
153
154 def lazyread(self):
154 def lazyread(self):
155 if self.map is None:
155 if self.map is None:
156 self.read()
156 self.read()
157
157
158 def parse(self, st):
158 def parse(self, st):
159 self.pl = [st[:20], st[20: 40]]
159 self.pl = [st[:20], st[20: 40]]
160
160
161 # deref fields so they will be local in loop
161 # deref fields so they will be local in loop
162 map = self.map
162 map = self.map
163 copymap = self.copymap
163 copymap = self.copymap
164 format = self.format
164 format = self.format
165 unpack = struct.unpack
165 unpack = struct.unpack
166
166
167 pos = 40
167 pos = 40
168 e_size = struct.calcsize(format)
168 e_size = struct.calcsize(format)
169
169
170 while pos < len(st):
170 while pos < len(st):
171 newpos = pos + e_size
171 newpos = pos + e_size
172 e = unpack(format, st[pos:newpos])
172 e = unpack(format, st[pos:newpos])
173 l = e[4]
173 l = e[4]
174 pos = newpos
174 pos = newpos
175 newpos = pos + l
175 newpos = pos + l
176 f = st[pos:newpos]
176 f = st[pos:newpos]
177 if '\0' in f:
177 if '\0' in f:
178 f, c = f.split('\0')
178 f, c = f.split('\0')
179 copymap[f] = c
179 copymap[f] = c
180 map[f] = e[:4]
180 map[f] = e[:4]
181 pos = newpos
181 pos = newpos
182
182
183 def read(self):
183 def read(self):
184 self.map = {}
184 self.map = {}
185 self.pl = [nullid, nullid]
185 self.pl = [nullid, nullid]
186 try:
186 try:
187 st = self.opener("dirstate").read()
187 st = self.opener("dirstate").read()
188 if st:
188 if st:
189 self.parse(st)
189 self.parse(st)
190 except IOError, err:
190 except IOError, err:
191 if err.errno != errno.ENOENT: raise
191 if err.errno != errno.ENOENT: raise
192
192
193 def copy(self, source, dest):
193 def copy(self, source, dest):
194 self.lazyread()
194 self.lazyread()
195 self.markdirty()
195 self.markdirty()
196 self.copymap[dest] = source
196 self.copymap[dest] = source
197
197
198 def copied(self, file):
198 def copied(self, file):
199 return self.copymap.get(file, None)
199 return self.copymap.get(file, None)
200
200
201 def copies(self):
201 def copies(self):
202 return self.copymap
202 return self.copymap
203
203
204 def initdirs(self):
204 def initdirs(self):
205 if self.dirs is None:
205 if self.dirs is None:
206 self.dirs = {}
206 self.dirs = {}
207 for f in self.map:
207 for f in self.map:
208 self.updatedirs(f, 1)
208 self.updatedirs(f, 1)
209
209
210 def updatedirs(self, path, delta):
210 def updatedirs(self, path, delta):
211 if self.dirs is not None:
211 if self.dirs is not None:
212 for c in strutil.findall(path, '/'):
212 for c in strutil.findall(path, '/'):
213 pc = path[:c]
213 pc = path[:c]
214 self.dirs.setdefault(pc, 0)
214 self.dirs.setdefault(pc, 0)
215 self.dirs[pc] += delta
215 self.dirs[pc] += delta
216
216
217 def checkinterfering(self, files):
217 def checkinterfering(self, files):
218 def prefixes(f):
218 def prefixes(f):
219 for c in strutil.rfindall(f, '/'):
219 for c in strutil.rfindall(f, '/'):
220 yield f[:c]
220 yield f[:c]
221 self.lazyread()
221 self.lazyread()
222 self.initdirs()
222 self.initdirs()
223 seendirs = {}
223 seendirs = {}
224 for f in files:
224 for f in files:
225 # shadows
225 # shadows
226 if self.dirs.get(f):
226 if self.dirs.get(f):
227 raise util.Abort(_('directory named %r already in dirstate') %
227 raise util.Abort(_('directory named %r already in dirstate') %
228 f)
228 f)
229 for d in prefixes(f):
229 for d in prefixes(f):
230 if d in seendirs:
230 if d in seendirs:
231 break
231 break
232 if d in self.map:
232 if d in self.map:
233 raise util.Abort(_('file named %r already in dirstate') %
233 raise util.Abort(_('file named %r already in dirstate') %
234 d)
234 d)
235 seendirs[d] = True
235 seendirs[d] = True
236 # disallowed
236 # disallowed
237 if '\r' in f or '\n' in f:
237 if '\r' in f or '\n' in f:
238 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
238 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
239
239
240 def update(self, files, state, **kw):
240 def update(self, files, state, **kw):
241 ''' current states:
241 ''' current states:
242 n normal
242 n normal
243 m needs merging
243 m needs merging
244 r marked for removal
244 r marked for removal
245 a marked for addition'''
245 a marked for addition'''
246
246
247 if not files: return
247 if not files: return
248 self.lazyread()
248 self.lazyread()
249 self.markdirty()
249 self.markdirty()
250 if state == "a":
250 if state == "a":
251 self.initdirs()
251 self.initdirs()
252 self.checkinterfering(files)
252 self.checkinterfering(files)
253 for f in files:
253 for f in files:
254 if state == "r":
254 if state == "r":
255 self.map[f] = ('r', 0, 0, 0)
255 self.map[f] = ('r', 0, 0, 0)
256 self.updatedirs(f, -1)
256 self.updatedirs(f, -1)
257 else:
257 else:
258 if state == "a":
258 if state == "a":
259 self.updatedirs(f, 1)
259 self.updatedirs(f, 1)
260 s = os.lstat(self.wjoin(f))
260 s = os.lstat(self.wjoin(f))
261 st_size = kw.get('st_size', s.st_size)
261 st_size = kw.get('st_size', s.st_size)
262 st_mtime = kw.get('st_mtime', s.st_mtime)
262 st_mtime = kw.get('st_mtime', s.st_mtime)
263 self.map[f] = (state, s.st_mode, st_size, st_mtime)
263 self.map[f] = (state, s.st_mode, st_size, st_mtime)
264 if self.copymap.has_key(f):
264 if self.copymap.has_key(f):
265 del self.copymap[f]
265 del self.copymap[f]
266
266
267 def forget(self, files):
267 def forget(self, files):
268 if not files: return
268 if not files: return
269 self.lazyread()
269 self.lazyread()
270 self.markdirty()
270 self.markdirty()
271 self.initdirs()
271 self.initdirs()
272 for f in files:
272 for f in files:
273 try:
273 try:
274 del self.map[f]
274 del self.map[f]
275 self.updatedirs(f, -1)
275 self.updatedirs(f, -1)
276 except KeyError:
276 except KeyError:
277 self.ui.warn(_("not in dirstate: %s!\n") % f)
277 self.ui.warn(_("not in dirstate: %s!\n") % f)
278 pass
278 pass
279
279
280 def clear(self):
280 def clear(self):
281 self.map = {}
281 self.map = {}
282 self.copymap = {}
282 self.copymap = {}
283 self.dirs = None
283 self.dirs = None
284 self.markdirty()
284 self.markdirty()
285
285
286 def rebuild(self, parent, files):
286 def rebuild(self, parent, files):
287 self.clear()
287 self.clear()
288 for f in files:
288 for f in files:
289 if files.execf(f):
289 if files.execf(f):
290 self.map[f] = ('n', 0777, -1, 0)
290 self.map[f] = ('n', 0777, -1, 0)
291 else:
291 else:
292 self.map[f] = ('n', 0666, -1, 0)
292 self.map[f] = ('n', 0666, -1, 0)
293 self.pl = (parent, nullid)
293 self.pl = (parent, nullid)
294 self.markdirty()
294 self.markdirty()
295
295
296 def write(self):
296 def write(self):
297 if not self.dirty:
297 if not self.dirty:
298 return
298 return
299 st = self.opener("dirstate", "w", atomic=True)
299 st = self.opener("dirstate", "w", atomic=True)
300 st.write("".join(self.pl))
300 st.write("".join(self.pl))
301 for f, e in self.map.items():
301 for f, e in self.map.items():
302 c = self.copied(f)
302 c = self.copied(f)
303 if c:
303 if c:
304 f = f + "\0" + c
304 f = f + "\0" + c
305 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
305 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
306 st.write(e + f)
306 st.write(e + f)
307 self.dirty = 0
307 self.dirty = 0
308
308
309 def filterfiles(self, files):
309 def filterfiles(self, files):
310 ret = {}
310 ret = {}
311 unknown = []
311 unknown = []
312
312
313 for x in files:
313 for x in files:
314 if x == '.':
314 if x == '.':
315 return self.map.copy()
315 return self.map.copy()
316 if x not in self.map:
316 if x not in self.map:
317 unknown.append(x)
317 unknown.append(x)
318 else:
318 else:
319 ret[x] = self.map[x]
319 ret[x] = self.map[x]
320
320
321 if not unknown:
321 if not unknown:
322 return ret
322 return ret
323
323
324 b = self.map.keys()
324 b = self.map.keys()
325 b.sort()
325 b.sort()
326 blen = len(b)
326 blen = len(b)
327
327
328 for x in unknown:
328 for x in unknown:
329 bs = bisect.bisect(b, "%s%s" % (x, '/'))
329 bs = bisect.bisect(b, "%s%s" % (x, '/'))
330 while bs < blen:
330 while bs < blen:
331 s = b[bs]
331 s = b[bs]
332 if len(s) > len(x) and s.startswith(x):
332 if len(s) > len(x) and s.startswith(x):
333 ret[s] = self.map[s]
333 ret[s] = self.map[s]
334 else:
334 else:
335 break
335 break
336 bs += 1
336 bs += 1
337 return ret
337 return ret
338
338
339 def supported_type(self, f, st, verbose=False):
339 def supported_type(self, f, st, verbose=False):
340 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
340 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
341 return True
341 return True
342 if verbose:
342 if verbose:
343 kind = 'unknown'
343 kind = 'unknown'
344 if stat.S_ISCHR(st.st_mode): kind = _('character device')
344 if stat.S_ISCHR(st.st_mode): kind = _('character device')
345 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
345 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
346 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
346 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
347 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
347 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
348 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
348 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
349 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
349 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
350 util.pathto(self.getcwd(), f),
350 util.pathto(self.getcwd(), f),
351 kind))
351 kind))
352 return False
352 return False
353
353
354 def walk(self, files=None, match=util.always, badmatch=None):
354 def walk(self, files=None, match=util.always, badmatch=None):
355 # filter out the stat
355 # filter out the stat
356 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
356 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
357 yield src, f
357 yield src, f
358
358
359 def statwalk(self, files=None, match=util.always, ignored=False,
359 def statwalk(self, files=None, match=util.always, ignored=False,
360 badmatch=None, directories=False):
360 badmatch=None, directories=False):
361 '''
361 '''
362 walk recursively through the directory tree, finding all files
362 walk recursively through the directory tree, finding all files
363 matched by the match function
363 matched by the match function
364
364
365 results are yielded in a tuple (src, filename, st), where src
365 results are yielded in a tuple (src, filename, st), where src
366 is one of:
366 is one of:
367 'f' the file was found in the directory tree
367 'f' the file was found in the directory tree
368 'd' the file is a directory of the tree
368 'd' the file is a directory of the tree
369 'm' the file was only in the dirstate and not in the tree
369 'm' the file was only in the dirstate and not in the tree
370 'b' file was not found and matched badmatch
370 'b' file was not found and matched badmatch
371
371
372 and st is the stat result if the file was found in the directory.
372 and st is the stat result if the file was found in the directory.
373 '''
373 '''
374 self.lazyread()
374 self.lazyread()
375
375
376 # walk all files by default
376 # walk all files by default
377 if not files:
377 if not files:
378 files = [self.root]
378 files = ['.']
379 dc = self.map.copy()
379 dc = self.map.copy()
380 else:
380 else:
381 files = util.unique(files)
381 files = util.unique(files)
382 dc = self.filterfiles(files)
382 dc = self.filterfiles(files)
383
383
384 def imatch(file_):
384 def imatch(file_):
385 if file_ not in dc and self.ignore(file_):
385 if file_ not in dc and self.ignore(file_):
386 return False
386 return False
387 return match(file_)
387 return match(file_)
388
388
389 if ignored: imatch = match
389 if ignored: imatch = match
390
390
391 # self.root may end with a path separator when self.root == '/'
391 # self.root may end with a path separator when self.root == '/'
392 common_prefix_len = len(self.root)
392 common_prefix_len = len(self.root)
393 if not self.root.endswith(os.sep):
393 if not self.root.endswith(os.sep):
394 common_prefix_len += 1
394 common_prefix_len += 1
395 # recursion free walker, faster than os.walk.
395 # recursion free walker, faster than os.walk.
396 def findfiles(s):
396 def findfiles(s):
397 work = [s]
397 work = [s]
398 if directories:
398 if directories:
399 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
399 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
400 while work:
400 while work:
401 top = work.pop()
401 top = work.pop()
402 names = os.listdir(top)
402 names = os.listdir(top)
403 names.sort()
403 names.sort()
404 # nd is the top of the repository dir tree
404 # nd is the top of the repository dir tree
405 nd = util.normpath(top[common_prefix_len:])
405 nd = util.normpath(top[common_prefix_len:])
406 if nd == '.':
406 if nd == '.':
407 nd = ''
407 nd = ''
408 else:
408 else:
409 # do not recurse into a repo contained in this
409 # do not recurse into a repo contained in this
410 # one. use bisect to find .hg directory so speed
410 # one. use bisect to find .hg directory so speed
411 # is good on big directory.
411 # is good on big directory.
412 hg = bisect.bisect_left(names, '.hg')
412 hg = bisect.bisect_left(names, '.hg')
413 if hg < len(names) and names[hg] == '.hg':
413 if hg < len(names) and names[hg] == '.hg':
414 if os.path.isdir(os.path.join(top, '.hg')):
414 if os.path.isdir(os.path.join(top, '.hg')):
415 continue
415 continue
416 for f in names:
416 for f in names:
417 np = util.pconvert(os.path.join(nd, f))
417 np = util.pconvert(os.path.join(nd, f))
418 if seen(np):
418 if seen(np):
419 continue
419 continue
420 p = os.path.join(top, f)
420 p = os.path.join(top, f)
421 # don't trip over symlinks
421 # don't trip over symlinks
422 st = os.lstat(p)
422 st = os.lstat(p)
423 if stat.S_ISDIR(st.st_mode):
423 if stat.S_ISDIR(st.st_mode):
424 ds = util.pconvert(os.path.join(nd, f +'/'))
424 ds = util.pconvert(os.path.join(nd, f +'/'))
425 if imatch(ds):
425 if imatch(ds):
426 work.append(p)
426 work.append(p)
427 if directories:
427 if directories:
428 yield 'd', np, st
428 yield 'd', np, st
429 if imatch(np) and np in dc:
429 if imatch(np) and np in dc:
430 yield 'm', np, st
430 yield 'm', np, st
431 elif imatch(np):
431 elif imatch(np):
432 if self.supported_type(np, st):
432 if self.supported_type(np, st):
433 yield 'f', np, st
433 yield 'f', np, st
434 elif np in dc:
434 elif np in dc:
435 yield 'm', np, st
435 yield 'm', np, st
436
436
437 known = {'.hg': 1}
437 known = {'.hg': 1}
438 def seen(fn):
438 def seen(fn):
439 if fn in known: return True
439 if fn in known: return True
440 known[fn] = 1
440 known[fn] = 1
441
441
442 # step one, find all files that match our criteria
442 # step one, find all files that match our criteria
443 files.sort()
443 files.sort()
444 for ff in files:
444 for ff in files:
445 nf = util.normpath(ff)
445 nf = util.normpath(ff)
446 f = self.wjoin(ff)
446 f = self.wjoin(ff)
447 try:
447 try:
448 st = os.lstat(f)
448 st = os.lstat(f)
449 except OSError, inst:
449 except OSError, inst:
450 found = False
450 found = False
451 for fn in dc:
451 for fn in dc:
452 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
452 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
453 found = True
453 found = True
454 break
454 break
455 if not found:
455 if not found:
456 if inst.errno != errno.ENOENT or not badmatch:
456 if inst.errno != errno.ENOENT or not badmatch:
457 self.ui.warn('%s: %s\n' % (
457 self.ui.warn('%s: %s\n' % (
458 util.pathto(self.getcwd(), ff),
458 util.pathto(self.getcwd(), ff),
459 inst.strerror))
459 inst.strerror))
460 elif badmatch and badmatch(ff) and imatch(nf):
460 elif badmatch and badmatch(ff) and imatch(nf):
461 yield 'b', ff, None
461 yield 'b', ff, None
462 continue
462 continue
463 if stat.S_ISDIR(st.st_mode):
463 if stat.S_ISDIR(st.st_mode):
464 cmp1 = (lambda x, y: cmp(x[1], y[1]))
464 cmp1 = (lambda x, y: cmp(x[1], y[1]))
465 sorted_ = [ x for x in findfiles(f) ]
465 sorted_ = [ x for x in findfiles(f) ]
466 sorted_.sort(cmp1)
466 sorted_.sort(cmp1)
467 for e in sorted_:
467 for e in sorted_:
468 yield e
468 yield e
469 else:
469 else:
470 if not seen(nf) and match(nf):
470 if not seen(nf) and match(nf):
471 if self.supported_type(ff, st, verbose=True):
471 if self.supported_type(ff, st, verbose=True):
472 yield 'f', nf, st
472 yield 'f', nf, st
473 elif ff in dc:
473 elif ff in dc:
474 yield 'm', nf, st
474 yield 'm', nf, st
475
475
476 # step two run through anything left in the dc hash and yield
476 # step two run through anything left in the dc hash and yield
477 # if we haven't already seen it
477 # if we haven't already seen it
478 ks = dc.keys()
478 ks = dc.keys()
479 ks.sort()
479 ks.sort()
480 for k in ks:
480 for k in ks:
481 if not seen(k) and imatch(k):
481 if not seen(k) and imatch(k):
482 yield 'm', k, None
482 yield 'm', k, None
483
483
484 def status(self, files=None, match=util.always, list_ignored=False,
484 def status(self, files=None, match=util.always, list_ignored=False,
485 list_clean=False):
485 list_clean=False):
486 lookup, modified, added, unknown, ignored = [], [], [], [], []
486 lookup, modified, added, unknown, ignored = [], [], [], [], []
487 removed, deleted, clean = [], [], []
487 removed, deleted, clean = [], [], []
488
488
489 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
489 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
490 try:
490 try:
491 type_, mode, size, time = self[fn]
491 type_, mode, size, time = self[fn]
492 except KeyError:
492 except KeyError:
493 if list_ignored and self.ignore(fn):
493 if list_ignored and self.ignore(fn):
494 ignored.append(fn)
494 ignored.append(fn)
495 else:
495 else:
496 unknown.append(fn)
496 unknown.append(fn)
497 continue
497 continue
498 if src == 'm':
498 if src == 'm':
499 nonexistent = True
499 nonexistent = True
500 if not st:
500 if not st:
501 try:
501 try:
502 st = os.lstat(self.wjoin(fn))
502 st = os.lstat(self.wjoin(fn))
503 except OSError, inst:
503 except OSError, inst:
504 if inst.errno != errno.ENOENT:
504 if inst.errno != errno.ENOENT:
505 raise
505 raise
506 st = None
506 st = None
507 # We need to re-check that it is a valid file
507 # We need to re-check that it is a valid file
508 if st and self.supported_type(fn, st):
508 if st and self.supported_type(fn, st):
509 nonexistent = False
509 nonexistent = False
510 # XXX: what to do with file no longer present in the fs
510 # XXX: what to do with file no longer present in the fs
511 # who are not removed in the dirstate ?
511 # who are not removed in the dirstate ?
512 if nonexistent and type_ in "nm":
512 if nonexistent and type_ in "nm":
513 deleted.append(fn)
513 deleted.append(fn)
514 continue
514 continue
515 # check the common case first
515 # check the common case first
516 if type_ == 'n':
516 if type_ == 'n':
517 if not st:
517 if not st:
518 st = os.lstat(self.wjoin(fn))
518 st = os.lstat(self.wjoin(fn))
519 if size >= 0 and (size != st.st_size
519 if size >= 0 and (size != st.st_size
520 or (mode ^ st.st_mode) & 0100):
520 or (mode ^ st.st_mode) & 0100):
521 modified.append(fn)
521 modified.append(fn)
522 elif time != int(st.st_mtime):
522 elif time != int(st.st_mtime):
523 lookup.append(fn)
523 lookup.append(fn)
524 elif list_clean:
524 elif list_clean:
525 clean.append(fn)
525 clean.append(fn)
526 elif type_ == 'm':
526 elif type_ == 'm':
527 modified.append(fn)
527 modified.append(fn)
528 elif type_ == 'a':
528 elif type_ == 'a':
529 added.append(fn)
529 added.append(fn)
530 elif type_ == 'r':
530 elif type_ == 'r':
531 removed.append(fn)
531 removed.append(fn)
532
532
533 return (lookup, modified, added, removed, deleted, unknown, ignored,
533 return (lookup, modified, added, removed, deleted, unknown, ignored,
534 clean)
534 clean)
@@ -1,1949 +1,1949 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18 branchcache_features = ('unnamed',)
18 branchcache_features = ('unnamed',)
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33
33
34 self.path = os.path.join(path, ".hg")
35 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.path = os.path.join(self.root, ".hg")
36 self.origroot = path
36 self.origroot = path
37 self.opener = util.opener(self.path)
37 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
38 self.wopener = util.opener(self.root)
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if parentui.configbool('format', 'usestore', True):
46 if parentui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 # create an invalid changelog
49 # create an invalid changelog
50 self.opener("00changelog.i", "a").write(
50 self.opener("00changelog.i", "a").write(
51 '\0\0\0\2' # represents revlogv2
51 '\0\0\0\2' # represents revlogv2
52 ' dummy changelog to prevent using the old repo layout'
52 ' dummy changelog to prevent using the old repo layout'
53 )
53 )
54 reqfile = self.opener("requires", "w")
54 reqfile = self.opener("requires", "w")
55 for r in requirements:
55 for r in requirements:
56 reqfile.write("%s\n" % r)
56 reqfile.write("%s\n" % r)
57 reqfile.close()
57 reqfile.close()
58 else:
58 else:
59 raise repo.RepoError(_("repository %s not found") % path)
59 raise repo.RepoError(_("repository %s not found") % path)
60 elif create:
60 elif create:
61 raise repo.RepoError(_("repository %s already exists") % path)
61 raise repo.RepoError(_("repository %s already exists") % path)
62 else:
62 else:
63 # find requirements
63 # find requirements
64 try:
64 try:
65 requirements = self.opener("requires").read().splitlines()
65 requirements = self.opener("requires").read().splitlines()
66 except IOError, inst:
66 except IOError, inst:
67 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
68 raise
68 raise
69 requirements = []
69 requirements = []
70 # check them
70 # check them
71 for r in requirements:
71 for r in requirements:
72 if r not in self.supported:
72 if r not in self.supported:
73 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 raise repo.RepoError(_("requirement '%s' not supported") % r)
74
74
75 # setup store
75 # setup store
76 if "store" in requirements:
76 if "store" in requirements:
77 self.encodefn = util.encodefilename
77 self.encodefn = util.encodefilename
78 self.decodefn = util.decodefilename
78 self.decodefn = util.decodefilename
79 self.spath = os.path.join(self.path, "store")
79 self.spath = os.path.join(self.path, "store")
80 else:
80 else:
81 self.encodefn = lambda x: x
81 self.encodefn = lambda x: x
82 self.decodefn = lambda x: x
82 self.decodefn = lambda x: x
83 self.spath = self.path
83 self.spath = self.path
84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
85
85
86 self.ui = ui.ui(parentui=parentui)
86 self.ui = ui.ui(parentui=parentui)
87 try:
87 try:
88 self.ui.readconfig(self.join("hgrc"), self.root)
88 self.ui.readconfig(self.join("hgrc"), self.root)
89 except IOError:
89 except IOError:
90 pass
90 pass
91
91
92 v = self.ui.configrevlog()
92 v = self.ui.configrevlog()
93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
95 fl = v.get('flags', None)
95 fl = v.get('flags', None)
96 flags = 0
96 flags = 0
97 if fl != None:
97 if fl != None:
98 for x in fl.split():
98 for x in fl.split():
99 flags |= revlog.flagstr(x)
99 flags |= revlog.flagstr(x)
100 elif self.revlogv1:
100 elif self.revlogv1:
101 flags = revlog.REVLOG_DEFAULT_FLAGS
101 flags = revlog.REVLOG_DEFAULT_FLAGS
102
102
103 v = self.revlogversion | flags
103 v = self.revlogversion | flags
104 self.manifest = manifest.manifest(self.sopener, v)
104 self.manifest = manifest.manifest(self.sopener, v)
105 self.changelog = changelog.changelog(self.sopener, v)
105 self.changelog = changelog.changelog(self.sopener, v)
106
106
107 fallback = self.ui.config('ui', 'fallbackencoding')
107 fallback = self.ui.config('ui', 'fallbackencoding')
108 if fallback:
108 if fallback:
109 util._fallbackencoding = fallback
109 util._fallbackencoding = fallback
110
110
111 # the changelog might not have the inline index flag
111 # the changelog might not have the inline index flag
112 # on. If the format of the changelog is the same as found in
112 # on. If the format of the changelog is the same as found in
113 # .hgrc, apply any flags found in the .hgrc as well.
113 # .hgrc, apply any flags found in the .hgrc as well.
114 # Otherwise, just version from the changelog
114 # Otherwise, just version from the changelog
115 v = self.changelog.version
115 v = self.changelog.version
116 if v == self.revlogversion:
116 if v == self.revlogversion:
117 v |= flags
117 v |= flags
118 self.revlogversion = v
118 self.revlogversion = v
119
119
120 self.tagscache = None
120 self.tagscache = None
121 self.branchcache = None
121 self.branchcache = None
122 self.nodetagscache = None
122 self.nodetagscache = None
123 self.filterpats = {}
123 self.filterpats = {}
124 self.transhandle = None
124 self.transhandle = None
125
125
126 self._link = lambda x: False
126 self._link = lambda x: False
127 if util.checklink(self.root):
127 if util.checklink(self.root):
128 r = self.root # avoid circular reference in lambda
128 r = self.root # avoid circular reference in lambda
129 self._link = lambda x: util.is_link(os.path.join(r, x))
129 self._link = lambda x: util.is_link(os.path.join(r, x))
130
130
131 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
131 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
132
132
133 def url(self):
133 def url(self):
134 return 'file:' + self.root
134 return 'file:' + self.root
135
135
136 def hook(self, name, throw=False, **args):
136 def hook(self, name, throw=False, **args):
137 def callhook(hname, funcname):
137 def callhook(hname, funcname):
138 '''call python hook. hook is callable object, looked up as
138 '''call python hook. hook is callable object, looked up as
139 name in python module. if callable returns "true", hook
139 name in python module. if callable returns "true", hook
140 fails, else passes. if hook raises exception, treated as
140 fails, else passes. if hook raises exception, treated as
141 hook failure. exception propagates if throw is "true".
141 hook failure. exception propagates if throw is "true".
142
142
143 reason for "true" meaning "hook failed" is so that
143 reason for "true" meaning "hook failed" is so that
144 unmodified commands (e.g. mercurial.commands.update) can
144 unmodified commands (e.g. mercurial.commands.update) can
145 be run as hooks without wrappers to convert return values.'''
145 be run as hooks without wrappers to convert return values.'''
146
146
147 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
147 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
148 obj = funcname
148 obj = funcname
149 if not callable(obj):
149 if not callable(obj):
150 d = funcname.rfind('.')
150 d = funcname.rfind('.')
151 if d == -1:
151 if d == -1:
152 raise util.Abort(_('%s hook is invalid ("%s" not in '
152 raise util.Abort(_('%s hook is invalid ("%s" not in '
153 'a module)') % (hname, funcname))
153 'a module)') % (hname, funcname))
154 modname = funcname[:d]
154 modname = funcname[:d]
155 try:
155 try:
156 obj = __import__(modname)
156 obj = __import__(modname)
157 except ImportError:
157 except ImportError:
158 try:
158 try:
159 # extensions are loaded with hgext_ prefix
159 # extensions are loaded with hgext_ prefix
160 obj = __import__("hgext_%s" % modname)
160 obj = __import__("hgext_%s" % modname)
161 except ImportError:
161 except ImportError:
162 raise util.Abort(_('%s hook is invalid '
162 raise util.Abort(_('%s hook is invalid '
163 '(import of "%s" failed)') %
163 '(import of "%s" failed)') %
164 (hname, modname))
164 (hname, modname))
165 try:
165 try:
166 for p in funcname.split('.')[1:]:
166 for p in funcname.split('.')[1:]:
167 obj = getattr(obj, p)
167 obj = getattr(obj, p)
168 except AttributeError, err:
168 except AttributeError, err:
169 raise util.Abort(_('%s hook is invalid '
169 raise util.Abort(_('%s hook is invalid '
170 '("%s" is not defined)') %
170 '("%s" is not defined)') %
171 (hname, funcname))
171 (hname, funcname))
172 if not callable(obj):
172 if not callable(obj):
173 raise util.Abort(_('%s hook is invalid '
173 raise util.Abort(_('%s hook is invalid '
174 '("%s" is not callable)') %
174 '("%s" is not callable)') %
175 (hname, funcname))
175 (hname, funcname))
176 try:
176 try:
177 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
177 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
178 except (KeyboardInterrupt, util.SignalInterrupt):
178 except (KeyboardInterrupt, util.SignalInterrupt):
179 raise
179 raise
180 except Exception, exc:
180 except Exception, exc:
181 if isinstance(exc, util.Abort):
181 if isinstance(exc, util.Abort):
182 self.ui.warn(_('error: %s hook failed: %s\n') %
182 self.ui.warn(_('error: %s hook failed: %s\n') %
183 (hname, exc.args[0]))
183 (hname, exc.args[0]))
184 else:
184 else:
185 self.ui.warn(_('error: %s hook raised an exception: '
185 self.ui.warn(_('error: %s hook raised an exception: '
186 '%s\n') % (hname, exc))
186 '%s\n') % (hname, exc))
187 if throw:
187 if throw:
188 raise
188 raise
189 self.ui.print_exc()
189 self.ui.print_exc()
190 return True
190 return True
191 if r:
191 if r:
192 if throw:
192 if throw:
193 raise util.Abort(_('%s hook failed') % hname)
193 raise util.Abort(_('%s hook failed') % hname)
194 self.ui.warn(_('warning: %s hook failed\n') % hname)
194 self.ui.warn(_('warning: %s hook failed\n') % hname)
195 return r
195 return r
196
196
197 def runhook(name, cmd):
197 def runhook(name, cmd):
198 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
198 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
199 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
199 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
200 r = util.system(cmd, environ=env, cwd=self.root)
200 r = util.system(cmd, environ=env, cwd=self.root)
201 if r:
201 if r:
202 desc, r = util.explain_exit(r)
202 desc, r = util.explain_exit(r)
203 if throw:
203 if throw:
204 raise util.Abort(_('%s hook %s') % (name, desc))
204 raise util.Abort(_('%s hook %s') % (name, desc))
205 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
205 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
206 return r
206 return r
207
207
208 r = False
208 r = False
209 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
209 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
210 if hname.split(".", 1)[0] == name and cmd]
210 if hname.split(".", 1)[0] == name and cmd]
211 hooks.sort()
211 hooks.sort()
212 for hname, cmd in hooks:
212 for hname, cmd in hooks:
213 if callable(cmd):
213 if callable(cmd):
214 r = callhook(hname, cmd) or r
214 r = callhook(hname, cmd) or r
215 elif cmd.startswith('python:'):
215 elif cmd.startswith('python:'):
216 r = callhook(hname, cmd[7:].strip()) or r
216 r = callhook(hname, cmd[7:].strip()) or r
217 else:
217 else:
218 r = runhook(hname, cmd) or r
218 r = runhook(hname, cmd) or r
219 return r
219 return r
220
220
221 tag_disallowed = ':\r\n'
221 tag_disallowed = ':\r\n'
222
222
223 def _tag(self, name, node, message, local, user, date, parent=None):
223 def _tag(self, name, node, message, local, user, date, parent=None):
224 use_dirstate = parent is None
224 use_dirstate = parent is None
225
225
226 for c in self.tag_disallowed:
226 for c in self.tag_disallowed:
227 if c in name:
227 if c in name:
228 raise util.Abort(_('%r cannot be used in a tag name') % c)
228 raise util.Abort(_('%r cannot be used in a tag name') % c)
229
229
230 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
230 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
231
231
232 if local:
232 if local:
233 # local tags are stored in the current charset
233 # local tags are stored in the current charset
234 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
234 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
235 self.hook('tag', node=hex(node), tag=name, local=local)
235 self.hook('tag', node=hex(node), tag=name, local=local)
236 return
236 return
237
237
238 # committed tags are stored in UTF-8
238 # committed tags are stored in UTF-8
239 line = '%s %s\n' % (hex(node), util.fromlocal(name))
239 line = '%s %s\n' % (hex(node), util.fromlocal(name))
240 if use_dirstate:
240 if use_dirstate:
241 self.wfile('.hgtags', 'ab').write(line)
241 self.wfile('.hgtags', 'ab').write(line)
242 else:
242 else:
243 ntags = self.filectx('.hgtags', parent).data()
243 ntags = self.filectx('.hgtags', parent).data()
244 self.wfile('.hgtags', 'ab').write(ntags + line)
244 self.wfile('.hgtags', 'ab').write(ntags + line)
245 if use_dirstate and self.dirstate.state('.hgtags') == '?':
245 if use_dirstate and self.dirstate.state('.hgtags') == '?':
246 self.add(['.hgtags'])
246 self.add(['.hgtags'])
247
247
248 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
248 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
249
249
250 self.hook('tag', node=hex(node), tag=name, local=local)
250 self.hook('tag', node=hex(node), tag=name, local=local)
251
251
252 return tagnode
252 return tagnode
253
253
254 def tag(self, name, node, message, local, user, date):
254 def tag(self, name, node, message, local, user, date):
255 '''tag a revision with a symbolic name.
255 '''tag a revision with a symbolic name.
256
256
257 if local is True, the tag is stored in a per-repository file.
257 if local is True, the tag is stored in a per-repository file.
258 otherwise, it is stored in the .hgtags file, and a new
258 otherwise, it is stored in the .hgtags file, and a new
259 changeset is committed with the change.
259 changeset is committed with the change.
260
260
261 keyword arguments:
261 keyword arguments:
262
262
263 local: whether to store tag in non-version-controlled file
263 local: whether to store tag in non-version-controlled file
264 (default False)
264 (default False)
265
265
266 message: commit message to use if committing
266 message: commit message to use if committing
267
267
268 user: name of user to use if committing
268 user: name of user to use if committing
269
269
270 date: date tuple to use if committing'''
270 date: date tuple to use if committing'''
271
271
272 for x in self.status()[:5]:
272 for x in self.status()[:5]:
273 if '.hgtags' in x:
273 if '.hgtags' in x:
274 raise util.Abort(_('working copy of .hgtags is changed '
274 raise util.Abort(_('working copy of .hgtags is changed '
275 '(please commit .hgtags manually)'))
275 '(please commit .hgtags manually)'))
276
276
277
277
278 self._tag(name, node, message, local, user, date)
278 self._tag(name, node, message, local, user, date)
279
279
280 def tags(self):
280 def tags(self):
281 '''return a mapping of tag to node'''
281 '''return a mapping of tag to node'''
282 if not self.tagscache:
282 if not self.tagscache:
283 self.tagscache = {}
283 self.tagscache = {}
284
284
285 def parsetag(line, context):
285 def parsetag(line, context):
286 if not line:
286 if not line:
287 return
287 return
288 s = l.split(" ", 1)
288 s = l.split(" ", 1)
289 if len(s) != 2:
289 if len(s) != 2:
290 self.ui.warn(_("%s: cannot parse entry\n") % context)
290 self.ui.warn(_("%s: cannot parse entry\n") % context)
291 return
291 return
292 node, key = s
292 node, key = s
293 key = util.tolocal(key.strip()) # stored in UTF-8
293 key = util.tolocal(key.strip()) # stored in UTF-8
294 try:
294 try:
295 bin_n = bin(node)
295 bin_n = bin(node)
296 except TypeError:
296 except TypeError:
297 self.ui.warn(_("%s: node '%s' is not well formed\n") %
297 self.ui.warn(_("%s: node '%s' is not well formed\n") %
298 (context, node))
298 (context, node))
299 return
299 return
300 if bin_n not in self.changelog.nodemap:
300 if bin_n not in self.changelog.nodemap:
301 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
301 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
302 (context, key))
302 (context, key))
303 return
303 return
304 self.tagscache[key] = bin_n
304 self.tagscache[key] = bin_n
305
305
306 # read the tags file from each head, ending with the tip,
306 # read the tags file from each head, ending with the tip,
307 # and add each tag found to the map, with "newer" ones
307 # and add each tag found to the map, with "newer" ones
308 # taking precedence
308 # taking precedence
309 f = None
309 f = None
310 for rev, node, fnode in self._hgtagsnodes():
310 for rev, node, fnode in self._hgtagsnodes():
311 f = (f and f.filectx(fnode) or
311 f = (f and f.filectx(fnode) or
312 self.filectx('.hgtags', fileid=fnode))
312 self.filectx('.hgtags', fileid=fnode))
313 count = 0
313 count = 0
314 for l in f.data().splitlines():
314 for l in f.data().splitlines():
315 count += 1
315 count += 1
316 parsetag(l, _("%s, line %d") % (str(f), count))
316 parsetag(l, _("%s, line %d") % (str(f), count))
317
317
318 try:
318 try:
319 f = self.opener("localtags")
319 f = self.opener("localtags")
320 count = 0
320 count = 0
321 for l in f:
321 for l in f:
322 # localtags are stored in the local character set
322 # localtags are stored in the local character set
323 # while the internal tag table is stored in UTF-8
323 # while the internal tag table is stored in UTF-8
324 l = util.fromlocal(l)
324 l = util.fromlocal(l)
325 count += 1
325 count += 1
326 parsetag(l, _("localtags, line %d") % count)
326 parsetag(l, _("localtags, line %d") % count)
327 except IOError:
327 except IOError:
328 pass
328 pass
329
329
330 self.tagscache['tip'] = self.changelog.tip()
330 self.tagscache['tip'] = self.changelog.tip()
331
331
332 return self.tagscache
332 return self.tagscache
333
333
334 def _hgtagsnodes(self):
334 def _hgtagsnodes(self):
335 heads = self.heads()
335 heads = self.heads()
336 heads.reverse()
336 heads.reverse()
337 last = {}
337 last = {}
338 ret = []
338 ret = []
339 for node in heads:
339 for node in heads:
340 c = self.changectx(node)
340 c = self.changectx(node)
341 rev = c.rev()
341 rev = c.rev()
342 try:
342 try:
343 fnode = c.filenode('.hgtags')
343 fnode = c.filenode('.hgtags')
344 except revlog.LookupError:
344 except revlog.LookupError:
345 continue
345 continue
346 ret.append((rev, node, fnode))
346 ret.append((rev, node, fnode))
347 if fnode in last:
347 if fnode in last:
348 ret[last[fnode]] = None
348 ret[last[fnode]] = None
349 last[fnode] = len(ret) - 1
349 last[fnode] = len(ret) - 1
350 return [item for item in ret if item]
350 return [item for item in ret if item]
351
351
352 def tagslist(self):
352 def tagslist(self):
353 '''return a list of tags ordered by revision'''
353 '''return a list of tags ordered by revision'''
354 l = []
354 l = []
355 for t, n in self.tags().items():
355 for t, n in self.tags().items():
356 try:
356 try:
357 r = self.changelog.rev(n)
357 r = self.changelog.rev(n)
358 except:
358 except:
359 r = -2 # sort to the beginning of the list if unknown
359 r = -2 # sort to the beginning of the list if unknown
360 l.append((r, t, n))
360 l.append((r, t, n))
361 l.sort()
361 l.sort()
362 return [(t, n) for r, t, n in l]
362 return [(t, n) for r, t, n in l]
363
363
364 def nodetags(self, node):
364 def nodetags(self, node):
365 '''return the tags associated with a node'''
365 '''return the tags associated with a node'''
366 if not self.nodetagscache:
366 if not self.nodetagscache:
367 self.nodetagscache = {}
367 self.nodetagscache = {}
368 for t, n in self.tags().items():
368 for t, n in self.tags().items():
369 self.nodetagscache.setdefault(n, []).append(t)
369 self.nodetagscache.setdefault(n, []).append(t)
370 return self.nodetagscache.get(node, [])
370 return self.nodetagscache.get(node, [])
371
371
372 def _branchtags(self):
372 def _branchtags(self):
373 partial, last, lrev = self._readbranchcache()
373 partial, last, lrev = self._readbranchcache()
374
374
375 tiprev = self.changelog.count() - 1
375 tiprev = self.changelog.count() - 1
376 if lrev != tiprev:
376 if lrev != tiprev:
377 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._updatebranchcache(partial, lrev+1, tiprev+1)
378 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378 self._writebranchcache(partial, self.changelog.tip(), tiprev)
379
379
380 return partial
380 return partial
381
381
382 def branchtags(self):
382 def branchtags(self):
383 if self.branchcache is not None:
383 if self.branchcache is not None:
384 return self.branchcache
384 return self.branchcache
385
385
386 self.branchcache = {} # avoid recursion in changectx
386 self.branchcache = {} # avoid recursion in changectx
387 partial = self._branchtags()
387 partial = self._branchtags()
388
388
389 # the branch cache is stored on disk as UTF-8, but in the local
389 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
390 # charset internally
391 for k, v in partial.items():
391 for k, v in partial.items():
392 self.branchcache[util.tolocal(k)] = v
392 self.branchcache[util.tolocal(k)] = v
393 return self.branchcache
393 return self.branchcache
394
394
395 def _readbranchcache(self):
395 def _readbranchcache(self):
396 partial = {}
396 partial = {}
397 try:
397 try:
398 f = self.opener("branches.cache")
398 f = self.opener("branches.cache")
399 lines = f.read().split('\n')
399 lines = f.read().split('\n')
400 f.close()
400 f.close()
401 features = lines.pop(0).strip()
401 features = lines.pop(0).strip()
402 if not features.startswith('features: '):
402 if not features.startswith('features: '):
403 raise ValueError(_('branch cache: no features specified'))
403 raise ValueError(_('branch cache: no features specified'))
404 features = features.split(' ', 1)[1].split()
404 features = features.split(' ', 1)[1].split()
405 missing_features = []
405 missing_features = []
406 for feature in self.branchcache_features:
406 for feature in self.branchcache_features:
407 try:
407 try:
408 features.remove(feature)
408 features.remove(feature)
409 except ValueError, inst:
409 except ValueError, inst:
410 missing_features.append(feature)
410 missing_features.append(feature)
411 if missing_features:
411 if missing_features:
412 raise ValueError(_('branch cache: missing features: %s')
412 raise ValueError(_('branch cache: missing features: %s')
413 % ', '.join(missing_features))
413 % ', '.join(missing_features))
414 if features:
414 if features:
415 raise ValueError(_('branch cache: unknown features: %s')
415 raise ValueError(_('branch cache: unknown features: %s')
416 % ', '.join(features))
416 % ', '.join(features))
417 last, lrev = lines.pop(0).split(" ", 1)
417 last, lrev = lines.pop(0).split(" ", 1)
418 last, lrev = bin(last), int(lrev)
418 last, lrev = bin(last), int(lrev)
419 if not (lrev < self.changelog.count() and
419 if not (lrev < self.changelog.count() and
420 self.changelog.node(lrev) == last): # sanity check
420 self.changelog.node(lrev) == last): # sanity check
421 # invalidate the cache
421 # invalidate the cache
422 raise ValueError('Invalid branch cache: unknown tip')
422 raise ValueError('Invalid branch cache: unknown tip')
423 for l in lines:
423 for l in lines:
424 if not l: continue
424 if not l: continue
425 node, label = l.split(" ", 1)
425 node, label = l.split(" ", 1)
426 partial[label.strip()] = bin(node)
426 partial[label.strip()] = bin(node)
427 except (KeyboardInterrupt, util.SignalInterrupt):
427 except (KeyboardInterrupt, util.SignalInterrupt):
428 raise
428 raise
429 except Exception, inst:
429 except Exception, inst:
430 if self.ui.debugflag:
430 if self.ui.debugflag:
431 self.ui.warn(str(inst), '\n')
431 self.ui.warn(str(inst), '\n')
432 partial, last, lrev = {}, nullid, nullrev
432 partial, last, lrev = {}, nullid, nullrev
433 return partial, last, lrev
433 return partial, last, lrev
434
434
435 def _writebranchcache(self, branches, tip, tiprev):
435 def _writebranchcache(self, branches, tip, tiprev):
436 try:
436 try:
437 f = self.opener("branches.cache", "w")
437 f = self.opener("branches.cache", "w")
438 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
438 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, node in branches.iteritems():
440 for label, node in branches.iteritems():
441 f.write("%s %s\n" % (hex(node), label))
441 f.write("%s %s\n" % (hex(node), label))
442 except IOError:
442 except IOError:
443 pass
443 pass
444
444
445 def _updatebranchcache(self, partial, start, end):
445 def _updatebranchcache(self, partial, start, end):
446 for r in xrange(start, end):
446 for r in xrange(start, end):
447 c = self.changectx(r)
447 c = self.changectx(r)
448 b = c.branch()
448 b = c.branch()
449 partial[b] = c.node()
449 partial[b] = c.node()
450
450
451 def lookup(self, key):
451 def lookup(self, key):
452 if key == '.':
452 if key == '.':
453 key = self.dirstate.parents()[0]
453 key = self.dirstate.parents()[0]
454 if key == nullid:
454 if key == nullid:
455 raise repo.RepoError(_("no revision checked out"))
455 raise repo.RepoError(_("no revision checked out"))
456 elif key == 'null':
456 elif key == 'null':
457 return nullid
457 return nullid
458 n = self.changelog._match(key)
458 n = self.changelog._match(key)
459 if n:
459 if n:
460 return n
460 return n
461 if key in self.tags():
461 if key in self.tags():
462 return self.tags()[key]
462 return self.tags()[key]
463 if key in self.branchtags():
463 if key in self.branchtags():
464 return self.branchtags()[key]
464 return self.branchtags()[key]
465 n = self.changelog._partialmatch(key)
465 n = self.changelog._partialmatch(key)
466 if n:
466 if n:
467 return n
467 return n
468 raise repo.RepoError(_("unknown revision '%s'") % key)
468 raise repo.RepoError(_("unknown revision '%s'") % key)
469
469
470 def dev(self):
470 def dev(self):
471 return os.lstat(self.path).st_dev
471 return os.lstat(self.path).st_dev
472
472
473 def local(self):
473 def local(self):
474 return True
474 return True
475
475
476 def join(self, f):
476 def join(self, f):
477 return os.path.join(self.path, f)
477 return os.path.join(self.path, f)
478
478
479 def sjoin(self, f):
479 def sjoin(self, f):
480 f = self.encodefn(f)
480 f = self.encodefn(f)
481 return os.path.join(self.spath, f)
481 return os.path.join(self.spath, f)
482
482
483 def wjoin(self, f):
483 def wjoin(self, f):
484 return os.path.join(self.root, f)
484 return os.path.join(self.root, f)
485
485
486 def file(self, f):
486 def file(self, f):
487 if f[0] == '/':
487 if f[0] == '/':
488 f = f[1:]
488 f = f[1:]
489 return filelog.filelog(self.sopener, f, self.revlogversion)
489 return filelog.filelog(self.sopener, f, self.revlogversion)
490
490
491 def changectx(self, changeid=None):
491 def changectx(self, changeid=None):
492 return context.changectx(self, changeid)
492 return context.changectx(self, changeid)
493
493
494 def workingctx(self):
494 def workingctx(self):
495 return context.workingctx(self)
495 return context.workingctx(self)
496
496
497 def parents(self, changeid=None):
497 def parents(self, changeid=None):
498 '''
498 '''
499 get list of changectxs for parents of changeid or working directory
499 get list of changectxs for parents of changeid or working directory
500 '''
500 '''
501 if changeid is None:
501 if changeid is None:
502 pl = self.dirstate.parents()
502 pl = self.dirstate.parents()
503 else:
503 else:
504 n = self.changelog.lookup(changeid)
504 n = self.changelog.lookup(changeid)
505 pl = self.changelog.parents(n)
505 pl = self.changelog.parents(n)
506 if pl[1] == nullid:
506 if pl[1] == nullid:
507 return [self.changectx(pl[0])]
507 return [self.changectx(pl[0])]
508 return [self.changectx(pl[0]), self.changectx(pl[1])]
508 return [self.changectx(pl[0]), self.changectx(pl[1])]
509
509
510 def filectx(self, path, changeid=None, fileid=None):
510 def filectx(self, path, changeid=None, fileid=None):
511 """changeid can be a changeset revision, node, or tag.
511 """changeid can be a changeset revision, node, or tag.
512 fileid can be a file revision or node."""
512 fileid can be a file revision or node."""
513 return context.filectx(self, path, changeid, fileid)
513 return context.filectx(self, path, changeid, fileid)
514
514
515 def getcwd(self):
515 def getcwd(self):
516 return self.dirstate.getcwd()
516 return self.dirstate.getcwd()
517
517
518 def wfile(self, f, mode='r'):
518 def wfile(self, f, mode='r'):
519 return self.wopener(f, mode)
519 return self.wopener(f, mode)
520
520
521 def _filter(self, filter, filename, data):
521 def _filter(self, filter, filename, data):
522 if filter not in self.filterpats:
522 if filter not in self.filterpats:
523 l = []
523 l = []
524 for pat, cmd in self.ui.configitems(filter):
524 for pat, cmd in self.ui.configitems(filter):
525 mf = util.matcher(self.root, "", [pat], [], [])[1]
525 mf = util.matcher(self.root, "", [pat], [], [])[1]
526 l.append((mf, cmd))
526 l.append((mf, cmd))
527 self.filterpats[filter] = l
527 self.filterpats[filter] = l
528
528
529 for mf, cmd in self.filterpats[filter]:
529 for mf, cmd in self.filterpats[filter]:
530 if mf(filename):
530 if mf(filename):
531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
532 data = util.filter(data, cmd)
532 data = util.filter(data, cmd)
533 break
533 break
534
534
535 return data
535 return data
536
536
537 def wread(self, filename):
537 def wread(self, filename):
538 if self._link(filename):
538 if self._link(filename):
539 data = os.readlink(self.wjoin(filename))
539 data = os.readlink(self.wjoin(filename))
540 else:
540 else:
541 data = self.wopener(filename, 'r').read()
541 data = self.wopener(filename, 'r').read()
542 return self._filter("encode", filename, data)
542 return self._filter("encode", filename, data)
543
543
544 def wwrite(self, filename, data, flags):
544 def wwrite(self, filename, data, flags):
545 data = self._filter("decode", filename, data)
545 data = self._filter("decode", filename, data)
546 if "l" in flags:
546 if "l" in flags:
547 f = self.wjoin(filename)
547 f = self.wjoin(filename)
548 try:
548 try:
549 os.unlink(f)
549 os.unlink(f)
550 except OSError:
550 except OSError:
551 pass
551 pass
552 d = os.path.dirname(f)
552 d = os.path.dirname(f)
553 if not os.path.exists(d):
553 if not os.path.exists(d):
554 os.makedirs(d)
554 os.makedirs(d)
555 os.symlink(data, f)
555 os.symlink(data, f)
556 else:
556 else:
557 try:
557 try:
558 if self._link(filename):
558 if self._link(filename):
559 os.unlink(self.wjoin(filename))
559 os.unlink(self.wjoin(filename))
560 except OSError:
560 except OSError:
561 pass
561 pass
562 self.wopener(filename, 'w').write(data)
562 self.wopener(filename, 'w').write(data)
563 util.set_exec(self.wjoin(filename), "x" in flags)
563 util.set_exec(self.wjoin(filename), "x" in flags)
564
564
565 def wwritedata(self, filename, data):
565 def wwritedata(self, filename, data):
566 return self._filter("decode", filename, data)
566 return self._filter("decode", filename, data)
567
567
568 def transaction(self):
568 def transaction(self):
569 tr = self.transhandle
569 tr = self.transhandle
570 if tr != None and tr.running():
570 if tr != None and tr.running():
571 return tr.nest()
571 return tr.nest()
572
572
573 # save dirstate for rollback
573 # save dirstate for rollback
574 try:
574 try:
575 ds = self.opener("dirstate").read()
575 ds = self.opener("dirstate").read()
576 except IOError:
576 except IOError:
577 ds = ""
577 ds = ""
578 self.opener("journal.dirstate", "w").write(ds)
578 self.opener("journal.dirstate", "w").write(ds)
579
579
580 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 renames = [(self.sjoin("journal"), self.sjoin("undo")),
581 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
581 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
582 tr = transaction.transaction(self.ui.warn, self.sopener,
582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 self.sjoin("journal"),
583 self.sjoin("journal"),
584 aftertrans(renames))
584 aftertrans(renames))
585 self.transhandle = tr
585 self.transhandle = tr
586 return tr
586 return tr
587
587
588 def recover(self):
588 def recover(self):
589 l = self.lock()
589 l = self.lock()
590 if os.path.exists(self.sjoin("journal")):
590 if os.path.exists(self.sjoin("journal")):
591 self.ui.status(_("rolling back interrupted transaction\n"))
591 self.ui.status(_("rolling back interrupted transaction\n"))
592 transaction.rollback(self.sopener, self.sjoin("journal"))
592 transaction.rollback(self.sopener, self.sjoin("journal"))
593 self.reload()
593 self.reload()
594 return True
594 return True
595 else:
595 else:
596 self.ui.warn(_("no interrupted transaction available\n"))
596 self.ui.warn(_("no interrupted transaction available\n"))
597 return False
597 return False
598
598
599 def rollback(self, wlock=None):
599 def rollback(self, wlock=None):
600 if not wlock:
600 if not wlock:
601 wlock = self.wlock()
601 wlock = self.wlock()
602 l = self.lock()
602 l = self.lock()
603 if os.path.exists(self.sjoin("undo")):
603 if os.path.exists(self.sjoin("undo")):
604 self.ui.status(_("rolling back last transaction\n"))
604 self.ui.status(_("rolling back last transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("undo"))
605 transaction.rollback(self.sopener, self.sjoin("undo"))
606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 self.reload()
607 self.reload()
608 self.wreload()
608 self.wreload()
609 else:
609 else:
610 self.ui.warn(_("no rollback information available\n"))
610 self.ui.warn(_("no rollback information available\n"))
611
611
612 def wreload(self):
612 def wreload(self):
613 self.dirstate.read()
613 self.dirstate.read()
614
614
615 def reload(self):
615 def reload(self):
616 self.changelog.load()
616 self.changelog.load()
617 self.manifest.load()
617 self.manifest.load()
618 self.tagscache = None
618 self.tagscache = None
619 self.nodetagscache = None
619 self.nodetagscache = None
620
620
621 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
621 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
622 desc=None):
622 desc=None):
623 try:
623 try:
624 l = lock.lock(lockname, 0, releasefn, desc=desc)
624 l = lock.lock(lockname, 0, releasefn, desc=desc)
625 except lock.LockHeld, inst:
625 except lock.LockHeld, inst:
626 if not wait:
626 if not wait:
627 raise
627 raise
628 self.ui.warn(_("waiting for lock on %s held by %r\n") %
628 self.ui.warn(_("waiting for lock on %s held by %r\n") %
629 (desc, inst.locker))
629 (desc, inst.locker))
630 # default to 600 seconds timeout
630 # default to 600 seconds timeout
631 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
631 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
632 releasefn, desc=desc)
632 releasefn, desc=desc)
633 if acquirefn:
633 if acquirefn:
634 acquirefn()
634 acquirefn()
635 return l
635 return l
636
636
637 def lock(self, wait=1):
637 def lock(self, wait=1):
638 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
638 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
639 desc=_('repository %s') % self.origroot)
639 desc=_('repository %s') % self.origroot)
640
640
641 def wlock(self, wait=1):
641 def wlock(self, wait=1):
642 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
642 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
643 self.wreload,
643 self.wreload,
644 desc=_('working directory of %s') % self.origroot)
644 desc=_('working directory of %s') % self.origroot)
645
645
646 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
646 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
647 """
647 """
648 commit an individual file as part of a larger transaction
648 commit an individual file as part of a larger transaction
649 """
649 """
650
650
651 t = self.wread(fn)
651 t = self.wread(fn)
652 fl = self.file(fn)
652 fl = self.file(fn)
653 fp1 = manifest1.get(fn, nullid)
653 fp1 = manifest1.get(fn, nullid)
654 fp2 = manifest2.get(fn, nullid)
654 fp2 = manifest2.get(fn, nullid)
655
655
656 meta = {}
656 meta = {}
657 cp = self.dirstate.copied(fn)
657 cp = self.dirstate.copied(fn)
658 if cp:
658 if cp:
659 # Mark the new revision of this file as a copy of another
659 # Mark the new revision of this file as a copy of another
660 # file. This copy data will effectively act as a parent
660 # file. This copy data will effectively act as a parent
661 # of this new revision. If this is a merge, the first
661 # of this new revision. If this is a merge, the first
662 # parent will be the nullid (meaning "look up the copy data")
662 # parent will be the nullid (meaning "look up the copy data")
663 # and the second one will be the other parent. For example:
663 # and the second one will be the other parent. For example:
664 #
664 #
665 # 0 --- 1 --- 3 rev1 changes file foo
665 # 0 --- 1 --- 3 rev1 changes file foo
666 # \ / rev2 renames foo to bar and changes it
666 # \ / rev2 renames foo to bar and changes it
667 # \- 2 -/ rev3 should have bar with all changes and
667 # \- 2 -/ rev3 should have bar with all changes and
668 # should record that bar descends from
668 # should record that bar descends from
669 # bar in rev2 and foo in rev1
669 # bar in rev2 and foo in rev1
670 #
670 #
671 # this allows this merge to succeed:
671 # this allows this merge to succeed:
672 #
672 #
673 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
673 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
674 # \ / merging rev3 and rev4 should use bar@rev2
674 # \ / merging rev3 and rev4 should use bar@rev2
675 # \- 2 --- 4 as the merge base
675 # \- 2 --- 4 as the merge base
676 #
676 #
677 meta["copy"] = cp
677 meta["copy"] = cp
678 if not manifest2: # not a branch merge
678 if not manifest2: # not a branch merge
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 fp2 = nullid
680 fp2 = nullid
681 elif fp2 != nullid: # copied on remote side
681 elif fp2 != nullid: # copied on remote side
682 meta["copyrev"] = hex(manifest1.get(cp, nullid))
682 meta["copyrev"] = hex(manifest1.get(cp, nullid))
683 elif fp1 != nullid: # copied on local side, reversed
683 elif fp1 != nullid: # copied on local side, reversed
684 meta["copyrev"] = hex(manifest2.get(cp))
684 meta["copyrev"] = hex(manifest2.get(cp))
685 fp2 = fp1
685 fp2 = fp1
686 else: # directory rename
686 else: # directory rename
687 meta["copyrev"] = hex(manifest1.get(cp, nullid))
687 meta["copyrev"] = hex(manifest1.get(cp, nullid))
688 self.ui.debug(_(" %s: copy %s:%s\n") %
688 self.ui.debug(_(" %s: copy %s:%s\n") %
689 (fn, cp, meta["copyrev"]))
689 (fn, cp, meta["copyrev"]))
690 fp1 = nullid
690 fp1 = nullid
691 elif fp2 != nullid:
691 elif fp2 != nullid:
692 # is one parent an ancestor of the other?
692 # is one parent an ancestor of the other?
693 fpa = fl.ancestor(fp1, fp2)
693 fpa = fl.ancestor(fp1, fp2)
694 if fpa == fp1:
694 if fpa == fp1:
695 fp1, fp2 = fp2, nullid
695 fp1, fp2 = fp2, nullid
696 elif fpa == fp2:
696 elif fpa == fp2:
697 fp2 = nullid
697 fp2 = nullid
698
698
699 # is the file unmodified from the parent? report existing entry
699 # is the file unmodified from the parent? report existing entry
700 if fp2 == nullid and not fl.cmp(fp1, t):
700 if fp2 == nullid and not fl.cmp(fp1, t):
701 return fp1
701 return fp1
702
702
703 changelist.append(fn)
703 changelist.append(fn)
704 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
704 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
705
705
706 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
706 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
707 if p1 is None:
707 if p1 is None:
708 p1, p2 = self.dirstate.parents()
708 p1, p2 = self.dirstate.parents()
709 return self.commit(files=files, text=text, user=user, date=date,
709 return self.commit(files=files, text=text, user=user, date=date,
710 p1=p1, p2=p2, wlock=wlock, extra=extra)
710 p1=p1, p2=p2, wlock=wlock, extra=extra)
711
711
712 def commit(self, files=None, text="", user=None, date=None,
712 def commit(self, files=None, text="", user=None, date=None,
713 match=util.always, force=False, lock=None, wlock=None,
713 match=util.always, force=False, lock=None, wlock=None,
714 force_editor=False, p1=None, p2=None, extra={}):
714 force_editor=False, p1=None, p2=None, extra={}):
715
715
716 commit = []
716 commit = []
717 remove = []
717 remove = []
718 changed = []
718 changed = []
719 use_dirstate = (p1 is None) # not rawcommit
719 use_dirstate = (p1 is None) # not rawcommit
720 extra = extra.copy()
720 extra = extra.copy()
721
721
722 if use_dirstate:
722 if use_dirstate:
723 if files:
723 if files:
724 for f in files:
724 for f in files:
725 s = self.dirstate.state(f)
725 s = self.dirstate.state(f)
726 if s in 'nmai':
726 if s in 'nmai':
727 commit.append(f)
727 commit.append(f)
728 elif s == 'r':
728 elif s == 'r':
729 remove.append(f)
729 remove.append(f)
730 else:
730 else:
731 self.ui.warn(_("%s not tracked!\n") % f)
731 self.ui.warn(_("%s not tracked!\n") % f)
732 else:
732 else:
733 changes = self.status(match=match)[:5]
733 changes = self.status(match=match)[:5]
734 modified, added, removed, deleted, unknown = changes
734 modified, added, removed, deleted, unknown = changes
735 commit = modified + added
735 commit = modified + added
736 remove = removed
736 remove = removed
737 else:
737 else:
738 commit = files
738 commit = files
739
739
740 if use_dirstate:
740 if use_dirstate:
741 p1, p2 = self.dirstate.parents()
741 p1, p2 = self.dirstate.parents()
742 update_dirstate = True
742 update_dirstate = True
743 else:
743 else:
744 p1, p2 = p1, p2 or nullid
744 p1, p2 = p1, p2 or nullid
745 update_dirstate = (self.dirstate.parents()[0] == p1)
745 update_dirstate = (self.dirstate.parents()[0] == p1)
746
746
747 c1 = self.changelog.read(p1)
747 c1 = self.changelog.read(p1)
748 c2 = self.changelog.read(p2)
748 c2 = self.changelog.read(p2)
749 m1 = self.manifest.read(c1[0]).copy()
749 m1 = self.manifest.read(c1[0]).copy()
750 m2 = self.manifest.read(c2[0])
750 m2 = self.manifest.read(c2[0])
751
751
752 if use_dirstate:
752 if use_dirstate:
753 branchname = self.workingctx().branch()
753 branchname = self.workingctx().branch()
754 try:
754 try:
755 branchname = branchname.decode('UTF-8').encode('UTF-8')
755 branchname = branchname.decode('UTF-8').encode('UTF-8')
756 except UnicodeDecodeError:
756 except UnicodeDecodeError:
757 raise util.Abort(_('branch name not in UTF-8!'))
757 raise util.Abort(_('branch name not in UTF-8!'))
758 else:
758 else:
759 branchname = ""
759 branchname = ""
760
760
761 if use_dirstate:
761 if use_dirstate:
762 oldname = c1[5].get("branch", "") # stored in UTF-8
762 oldname = c1[5].get("branch", "") # stored in UTF-8
763 if not commit and not remove and not force and p2 == nullid and \
763 if not commit and not remove and not force and p2 == nullid and \
764 branchname == oldname:
764 branchname == oldname:
765 self.ui.status(_("nothing changed\n"))
765 self.ui.status(_("nothing changed\n"))
766 return None
766 return None
767
767
768 xp1 = hex(p1)
768 xp1 = hex(p1)
769 if p2 == nullid: xp2 = ''
769 if p2 == nullid: xp2 = ''
770 else: xp2 = hex(p2)
770 else: xp2 = hex(p2)
771
771
772 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
772 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
773
773
774 if not wlock:
774 if not wlock:
775 wlock = self.wlock()
775 wlock = self.wlock()
776 if not lock:
776 if not lock:
777 lock = self.lock()
777 lock = self.lock()
778 tr = self.transaction()
778 tr = self.transaction()
779
779
780 # check in files
780 # check in files
781 new = {}
781 new = {}
782 linkrev = self.changelog.count()
782 linkrev = self.changelog.count()
783 commit.sort()
783 commit.sort()
784 is_exec = util.execfunc(self.root, m1.execf)
784 is_exec = util.execfunc(self.root, m1.execf)
785 is_link = util.linkfunc(self.root, m1.linkf)
785 is_link = util.linkfunc(self.root, m1.linkf)
786 for f in commit:
786 for f in commit:
787 self.ui.note(f + "\n")
787 self.ui.note(f + "\n")
788 try:
788 try:
789 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
789 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
790 m1.set(f, is_exec(f), is_link(f))
790 m1.set(f, is_exec(f), is_link(f))
791 except (OSError, IOError):
791 except (OSError, IOError):
792 if use_dirstate:
792 if use_dirstate:
793 self.ui.warn(_("trouble committing %s!\n") % f)
793 self.ui.warn(_("trouble committing %s!\n") % f)
794 raise
794 raise
795 else:
795 else:
796 remove.append(f)
796 remove.append(f)
797
797
798 # update manifest
798 # update manifest
799 m1.update(new)
799 m1.update(new)
800 remove.sort()
800 remove.sort()
801 removed = []
801 removed = []
802
802
803 for f in remove:
803 for f in remove:
804 if f in m1:
804 if f in m1:
805 del m1[f]
805 del m1[f]
806 removed.append(f)
806 removed.append(f)
807 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
807 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
808
808
809 # add changeset
809 # add changeset
810 new = new.keys()
810 new = new.keys()
811 new.sort()
811 new.sort()
812
812
813 user = user or self.ui.username()
813 user = user or self.ui.username()
814 if not text or force_editor:
814 if not text or force_editor:
815 edittext = []
815 edittext = []
816 if text:
816 if text:
817 edittext.append(text)
817 edittext.append(text)
818 edittext.append("")
818 edittext.append("")
819 edittext.append("HG: user: %s" % user)
819 edittext.append("HG: user: %s" % user)
820 if p2 != nullid:
820 if p2 != nullid:
821 edittext.append("HG: branch merge")
821 edittext.append("HG: branch merge")
822 if branchname:
822 if branchname:
823 edittext.append("HG: branch %s" % util.tolocal(branchname))
823 edittext.append("HG: branch %s" % util.tolocal(branchname))
824 edittext.extend(["HG: changed %s" % f for f in changed])
824 edittext.extend(["HG: changed %s" % f for f in changed])
825 edittext.extend(["HG: removed %s" % f for f in removed])
825 edittext.extend(["HG: removed %s" % f for f in removed])
826 if not changed and not remove:
826 if not changed and not remove:
827 edittext.append("HG: no files changed")
827 edittext.append("HG: no files changed")
828 edittext.append("")
828 edittext.append("")
829 # run editor in the repository root
829 # run editor in the repository root
830 olddir = os.getcwd()
830 olddir = os.getcwd()
831 os.chdir(self.root)
831 os.chdir(self.root)
832 text = self.ui.edit("\n".join(edittext), user)
832 text = self.ui.edit("\n".join(edittext), user)
833 os.chdir(olddir)
833 os.chdir(olddir)
834
834
835 lines = [line.rstrip() for line in text.rstrip().splitlines()]
835 lines = [line.rstrip() for line in text.rstrip().splitlines()]
836 while lines and not lines[0]:
836 while lines and not lines[0]:
837 del lines[0]
837 del lines[0]
838 if not lines:
838 if not lines:
839 return None
839 return None
840 text = '\n'.join(lines)
840 text = '\n'.join(lines)
841 if branchname:
841 if branchname:
842 extra["branch"] = branchname
842 extra["branch"] = branchname
843 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
843 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
844 user, date, extra)
844 user, date, extra)
845 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
845 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
846 parent2=xp2)
846 parent2=xp2)
847 tr.close()
847 tr.close()
848
848
849 if self.branchcache and "branch" in extra:
849 if self.branchcache and "branch" in extra:
850 self.branchcache[util.tolocal(extra["branch"])] = n
850 self.branchcache[util.tolocal(extra["branch"])] = n
851
851
852 if use_dirstate or update_dirstate:
852 if use_dirstate or update_dirstate:
853 self.dirstate.setparents(n)
853 self.dirstate.setparents(n)
854 if use_dirstate:
854 if use_dirstate:
855 self.dirstate.update(new, "n")
855 self.dirstate.update(new, "n")
856 self.dirstate.forget(removed)
856 self.dirstate.forget(removed)
857
857
858 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
858 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
859 return n
859 return n
860
860
861 def walk(self, node=None, files=[], match=util.always, badmatch=None):
861 def walk(self, node=None, files=[], match=util.always, badmatch=None):
862 '''
862 '''
863 walk recursively through the directory tree or a given
863 walk recursively through the directory tree or a given
864 changeset, finding all files matched by the match
864 changeset, finding all files matched by the match
865 function
865 function
866
866
867 results are yielded in a tuple (src, filename), where src
867 results are yielded in a tuple (src, filename), where src
868 is one of:
868 is one of:
869 'f' the file was found in the directory tree
869 'f' the file was found in the directory tree
870 'm' the file was only in the dirstate and not in the tree
870 'm' the file was only in the dirstate and not in the tree
871 'b' file was not found and matched badmatch
871 'b' file was not found and matched badmatch
872 '''
872 '''
873
873
874 if node:
874 if node:
875 fdict = dict.fromkeys(files)
875 fdict = dict.fromkeys(files)
876 for fn in self.manifest.read(self.changelog.read(node)[0]):
876 for fn in self.manifest.read(self.changelog.read(node)[0]):
877 for ffn in fdict:
877 for ffn in fdict:
878 # match if the file is the exact name or a directory
878 # match if the file is the exact name or a directory
879 if ffn == fn or fn.startswith("%s/" % ffn):
879 if ffn == fn or fn.startswith("%s/" % ffn):
880 del fdict[ffn]
880 del fdict[ffn]
881 break
881 break
882 if match(fn):
882 if match(fn):
883 yield 'm', fn
883 yield 'm', fn
884 for fn in fdict:
884 for fn in fdict:
885 if badmatch and badmatch(fn):
885 if badmatch and badmatch(fn):
886 if match(fn):
886 if match(fn):
887 yield 'b', fn
887 yield 'b', fn
888 else:
888 else:
889 self.ui.warn(_('%s: No such file in rev %s\n') % (
889 self.ui.warn(_('%s: No such file in rev %s\n') % (
890 util.pathto(self.getcwd(), fn), short(node)))
890 util.pathto(self.getcwd(), fn), short(node)))
891 else:
891 else:
892 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
892 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
893 yield src, fn
893 yield src, fn
894
894
895 def status(self, node1=None, node2=None, files=[], match=util.always,
895 def status(self, node1=None, node2=None, files=[], match=util.always,
896 wlock=None, list_ignored=False, list_clean=False):
896 wlock=None, list_ignored=False, list_clean=False):
897 """return status of files between two nodes or node and working directory
897 """return status of files between two nodes or node and working directory
898
898
899 If node1 is None, use the first dirstate parent instead.
899 If node1 is None, use the first dirstate parent instead.
900 If node2 is None, compare node1 with working directory.
900 If node2 is None, compare node1 with working directory.
901 """
901 """
902
902
903 def fcmp(fn, getnode):
903 def fcmp(fn, getnode):
904 t1 = self.wread(fn)
904 t1 = self.wread(fn)
905 return self.file(fn).cmp(getnode(fn), t1)
905 return self.file(fn).cmp(getnode(fn), t1)
906
906
907 def mfmatches(node):
907 def mfmatches(node):
908 change = self.changelog.read(node)
908 change = self.changelog.read(node)
909 mf = self.manifest.read(change[0]).copy()
909 mf = self.manifest.read(change[0]).copy()
910 for fn in mf.keys():
910 for fn in mf.keys():
911 if not match(fn):
911 if not match(fn):
912 del mf[fn]
912 del mf[fn]
913 return mf
913 return mf
914
914
915 modified, added, removed, deleted, unknown = [], [], [], [], []
915 modified, added, removed, deleted, unknown = [], [], [], [], []
916 ignored, clean = [], []
916 ignored, clean = [], []
917
917
918 compareworking = False
918 compareworking = False
919 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
919 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
920 compareworking = True
920 compareworking = True
921
921
922 if not compareworking:
922 if not compareworking:
923 # read the manifest from node1 before the manifest from node2,
923 # read the manifest from node1 before the manifest from node2,
924 # so that we'll hit the manifest cache if we're going through
924 # so that we'll hit the manifest cache if we're going through
925 # all the revisions in parent->child order.
925 # all the revisions in parent->child order.
926 mf1 = mfmatches(node1)
926 mf1 = mfmatches(node1)
927
927
928 # are we comparing the working directory?
928 # are we comparing the working directory?
929 if not node2:
929 if not node2:
930 if not wlock:
930 if not wlock:
931 try:
931 try:
932 wlock = self.wlock(wait=0)
932 wlock = self.wlock(wait=0)
933 except lock.LockException:
933 except lock.LockException:
934 wlock = None
934 wlock = None
935 (lookup, modified, added, removed, deleted, unknown,
935 (lookup, modified, added, removed, deleted, unknown,
936 ignored, clean) = self.dirstate.status(files, match,
936 ignored, clean) = self.dirstate.status(files, match,
937 list_ignored, list_clean)
937 list_ignored, list_clean)
938
938
939 # are we comparing working dir against its parent?
939 # are we comparing working dir against its parent?
940 if compareworking:
940 if compareworking:
941 if lookup:
941 if lookup:
942 # do a full compare of any files that might have changed
942 # do a full compare of any files that might have changed
943 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
943 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
944 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
944 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
945 nullid)
945 nullid)
946 for f in lookup:
946 for f in lookup:
947 if fcmp(f, getnode):
947 if fcmp(f, getnode):
948 modified.append(f)
948 modified.append(f)
949 else:
949 else:
950 clean.append(f)
950 clean.append(f)
951 if wlock is not None:
951 if wlock is not None:
952 self.dirstate.update([f], "n")
952 self.dirstate.update([f], "n")
953 else:
953 else:
954 # we are comparing working dir against non-parent
954 # we are comparing working dir against non-parent
955 # generate a pseudo-manifest for the working dir
955 # generate a pseudo-manifest for the working dir
956 # XXX: create it in dirstate.py ?
956 # XXX: create it in dirstate.py ?
957 mf2 = mfmatches(self.dirstate.parents()[0])
957 mf2 = mfmatches(self.dirstate.parents()[0])
958 is_exec = util.execfunc(self.root, mf2.execf)
958 is_exec = util.execfunc(self.root, mf2.execf)
959 is_link = util.linkfunc(self.root, mf2.linkf)
959 is_link = util.linkfunc(self.root, mf2.linkf)
960 for f in lookup + modified + added:
960 for f in lookup + modified + added:
961 mf2[f] = ""
961 mf2[f] = ""
962 mf2.set(f, is_exec(f), is_link(f))
962 mf2.set(f, is_exec(f), is_link(f))
963 for f in removed:
963 for f in removed:
964 if f in mf2:
964 if f in mf2:
965 del mf2[f]
965 del mf2[f]
966 else:
966 else:
967 # we are comparing two revisions
967 # we are comparing two revisions
968 mf2 = mfmatches(node2)
968 mf2 = mfmatches(node2)
969
969
970 if not compareworking:
970 if not compareworking:
971 # flush lists from dirstate before comparing manifests
971 # flush lists from dirstate before comparing manifests
972 modified, added, clean = [], [], []
972 modified, added, clean = [], [], []
973
973
974 # make sure to sort the files so we talk to the disk in a
974 # make sure to sort the files so we talk to the disk in a
975 # reasonable order
975 # reasonable order
976 mf2keys = mf2.keys()
976 mf2keys = mf2.keys()
977 mf2keys.sort()
977 mf2keys.sort()
978 getnode = lambda fn: mf1.get(fn, nullid)
978 getnode = lambda fn: mf1.get(fn, nullid)
979 for fn in mf2keys:
979 for fn in mf2keys:
980 if mf1.has_key(fn):
980 if mf1.has_key(fn):
981 if mf1.flags(fn) != mf2.flags(fn) or \
981 if mf1.flags(fn) != mf2.flags(fn) or \
982 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
982 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
983 fcmp(fn, getnode))):
983 fcmp(fn, getnode))):
984 modified.append(fn)
984 modified.append(fn)
985 elif list_clean:
985 elif list_clean:
986 clean.append(fn)
986 clean.append(fn)
987 del mf1[fn]
987 del mf1[fn]
988 else:
988 else:
989 added.append(fn)
989 added.append(fn)
990
990
991 removed = mf1.keys()
991 removed = mf1.keys()
992
992
993 # sort and return results:
993 # sort and return results:
994 for l in modified, added, removed, deleted, unknown, ignored, clean:
994 for l in modified, added, removed, deleted, unknown, ignored, clean:
995 l.sort()
995 l.sort()
996 return (modified, added, removed, deleted, unknown, ignored, clean)
996 return (modified, added, removed, deleted, unknown, ignored, clean)
997
997
998 def add(self, list, wlock=None):
998 def add(self, list, wlock=None):
999 if not wlock:
999 if not wlock:
1000 wlock = self.wlock()
1000 wlock = self.wlock()
1001 for f in list:
1001 for f in list:
1002 p = self.wjoin(f)
1002 p = self.wjoin(f)
1003 islink = os.path.islink(p)
1003 islink = os.path.islink(p)
1004 if not islink and not os.path.exists(p):
1004 if not islink and not os.path.exists(p):
1005 self.ui.warn(_("%s does not exist!\n") % f)
1005 self.ui.warn(_("%s does not exist!\n") % f)
1006 elif not islink and not os.path.isfile(p):
1006 elif not islink and not os.path.isfile(p):
1007 self.ui.warn(_("%s not added: only files and symlinks "
1007 self.ui.warn(_("%s not added: only files and symlinks "
1008 "supported currently\n") % f)
1008 "supported currently\n") % f)
1009 elif self.dirstate.state(f) in 'an':
1009 elif self.dirstate.state(f) in 'an':
1010 self.ui.warn(_("%s already tracked!\n") % f)
1010 self.ui.warn(_("%s already tracked!\n") % f)
1011 else:
1011 else:
1012 self.dirstate.update([f], "a")
1012 self.dirstate.update([f], "a")
1013
1013
1014 def forget(self, list, wlock=None):
1014 def forget(self, list, wlock=None):
1015 if not wlock:
1015 if not wlock:
1016 wlock = self.wlock()
1016 wlock = self.wlock()
1017 for f in list:
1017 for f in list:
1018 if self.dirstate.state(f) not in 'ai':
1018 if self.dirstate.state(f) not in 'ai':
1019 self.ui.warn(_("%s not added!\n") % f)
1019 self.ui.warn(_("%s not added!\n") % f)
1020 else:
1020 else:
1021 self.dirstate.forget([f])
1021 self.dirstate.forget([f])
1022
1022
1023 def remove(self, list, unlink=False, wlock=None):
1023 def remove(self, list, unlink=False, wlock=None):
1024 if unlink:
1024 if unlink:
1025 for f in list:
1025 for f in list:
1026 try:
1026 try:
1027 util.unlink(self.wjoin(f))
1027 util.unlink(self.wjoin(f))
1028 except OSError, inst:
1028 except OSError, inst:
1029 if inst.errno != errno.ENOENT:
1029 if inst.errno != errno.ENOENT:
1030 raise
1030 raise
1031 if not wlock:
1031 if not wlock:
1032 wlock = self.wlock()
1032 wlock = self.wlock()
1033 for f in list:
1033 for f in list:
1034 p = self.wjoin(f)
1034 p = self.wjoin(f)
1035 if os.path.exists(p):
1035 if os.path.exists(p):
1036 self.ui.warn(_("%s still exists!\n") % f)
1036 self.ui.warn(_("%s still exists!\n") % f)
1037 elif self.dirstate.state(f) == 'a':
1037 elif self.dirstate.state(f) == 'a':
1038 self.dirstate.forget([f])
1038 self.dirstate.forget([f])
1039 elif f not in self.dirstate:
1039 elif f not in self.dirstate:
1040 self.ui.warn(_("%s not tracked!\n") % f)
1040 self.ui.warn(_("%s not tracked!\n") % f)
1041 else:
1041 else:
1042 self.dirstate.update([f], "r")
1042 self.dirstate.update([f], "r")
1043
1043
1044 def undelete(self, list, wlock=None):
1044 def undelete(self, list, wlock=None):
1045 p = self.dirstate.parents()[0]
1045 p = self.dirstate.parents()[0]
1046 mn = self.changelog.read(p)[0]
1046 mn = self.changelog.read(p)[0]
1047 m = self.manifest.read(mn)
1047 m = self.manifest.read(mn)
1048 if not wlock:
1048 if not wlock:
1049 wlock = self.wlock()
1049 wlock = self.wlock()
1050 for f in list:
1050 for f in list:
1051 if self.dirstate.state(f) not in "r":
1051 if self.dirstate.state(f) not in "r":
1052 self.ui.warn("%s not removed!\n" % f)
1052 self.ui.warn("%s not removed!\n" % f)
1053 else:
1053 else:
1054 t = self.file(f).read(m[f])
1054 t = self.file(f).read(m[f])
1055 self.wwrite(f, t, m.flags(f))
1055 self.wwrite(f, t, m.flags(f))
1056 self.dirstate.update([f], "n")
1056 self.dirstate.update([f], "n")
1057
1057
1058 def copy(self, source, dest, wlock=None):
1058 def copy(self, source, dest, wlock=None):
1059 p = self.wjoin(dest)
1059 p = self.wjoin(dest)
1060 if not os.path.exists(p):
1060 if not os.path.exists(p):
1061 self.ui.warn(_("%s does not exist!\n") % dest)
1061 self.ui.warn(_("%s does not exist!\n") % dest)
1062 elif not os.path.isfile(p):
1062 elif not os.path.isfile(p):
1063 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1063 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1064 else:
1064 else:
1065 if not wlock:
1065 if not wlock:
1066 wlock = self.wlock()
1066 wlock = self.wlock()
1067 if self.dirstate.state(dest) == '?':
1067 if self.dirstate.state(dest) == '?':
1068 self.dirstate.update([dest], "a")
1068 self.dirstate.update([dest], "a")
1069 self.dirstate.copy(source, dest)
1069 self.dirstate.copy(source, dest)
1070
1070
1071 def heads(self, start=None):
1071 def heads(self, start=None):
1072 heads = self.changelog.heads(start)
1072 heads = self.changelog.heads(start)
1073 # sort the output in rev descending order
1073 # sort the output in rev descending order
1074 heads = [(-self.changelog.rev(h), h) for h in heads]
1074 heads = [(-self.changelog.rev(h), h) for h in heads]
1075 heads.sort()
1075 heads.sort()
1076 return [n for (r, n) in heads]
1076 return [n for (r, n) in heads]
1077
1077
1078 def branches(self, nodes):
1078 def branches(self, nodes):
1079 if not nodes:
1079 if not nodes:
1080 nodes = [self.changelog.tip()]
1080 nodes = [self.changelog.tip()]
1081 b = []
1081 b = []
1082 for n in nodes:
1082 for n in nodes:
1083 t = n
1083 t = n
1084 while 1:
1084 while 1:
1085 p = self.changelog.parents(n)
1085 p = self.changelog.parents(n)
1086 if p[1] != nullid or p[0] == nullid:
1086 if p[1] != nullid or p[0] == nullid:
1087 b.append((t, n, p[0], p[1]))
1087 b.append((t, n, p[0], p[1]))
1088 break
1088 break
1089 n = p[0]
1089 n = p[0]
1090 return b
1090 return b
1091
1091
1092 def between(self, pairs):
1092 def between(self, pairs):
1093 r = []
1093 r = []
1094
1094
1095 for top, bottom in pairs:
1095 for top, bottom in pairs:
1096 n, l, i = top, [], 0
1096 n, l, i = top, [], 0
1097 f = 1
1097 f = 1
1098
1098
1099 while n != bottom:
1099 while n != bottom:
1100 p = self.changelog.parents(n)[0]
1100 p = self.changelog.parents(n)[0]
1101 if i == f:
1101 if i == f:
1102 l.append(n)
1102 l.append(n)
1103 f = f * 2
1103 f = f * 2
1104 n = p
1104 n = p
1105 i += 1
1105 i += 1
1106
1106
1107 r.append(l)
1107 r.append(l)
1108
1108
1109 return r
1109 return r
1110
1110
1111 def findincoming(self, remote, base=None, heads=None, force=False):
1111 def findincoming(self, remote, base=None, heads=None, force=False):
1112 """Return list of roots of the subsets of missing nodes from remote
1112 """Return list of roots of the subsets of missing nodes from remote
1113
1113
1114 If base dict is specified, assume that these nodes and their parents
1114 If base dict is specified, assume that these nodes and their parents
1115 exist on the remote side and that no child of a node of base exists
1115 exist on the remote side and that no child of a node of base exists
1116 in both remote and self.
1116 in both remote and self.
1117 Furthermore base will be updated to include the nodes that exists
1117 Furthermore base will be updated to include the nodes that exists
1118 in self and remote but no children exists in self and remote.
1118 in self and remote but no children exists in self and remote.
1119 If a list of heads is specified, return only nodes which are heads
1119 If a list of heads is specified, return only nodes which are heads
1120 or ancestors of these heads.
1120 or ancestors of these heads.
1121
1121
1122 All the ancestors of base are in self and in remote.
1122 All the ancestors of base are in self and in remote.
1123 All the descendants of the list returned are missing in self.
1123 All the descendants of the list returned are missing in self.
1124 (and so we know that the rest of the nodes are missing in remote, see
1124 (and so we know that the rest of the nodes are missing in remote, see
1125 outgoing)
1125 outgoing)
1126 """
1126 """
1127 m = self.changelog.nodemap
1127 m = self.changelog.nodemap
1128 search = []
1128 search = []
1129 fetch = {}
1129 fetch = {}
1130 seen = {}
1130 seen = {}
1131 seenbranch = {}
1131 seenbranch = {}
1132 if base == None:
1132 if base == None:
1133 base = {}
1133 base = {}
1134
1134
1135 if not heads:
1135 if not heads:
1136 heads = remote.heads()
1136 heads = remote.heads()
1137
1137
1138 if self.changelog.tip() == nullid:
1138 if self.changelog.tip() == nullid:
1139 base[nullid] = 1
1139 base[nullid] = 1
1140 if heads != [nullid]:
1140 if heads != [nullid]:
1141 return [nullid]
1141 return [nullid]
1142 return []
1142 return []
1143
1143
1144 # assume we're closer to the tip than the root
1144 # assume we're closer to the tip than the root
1145 # and start by examining the heads
1145 # and start by examining the heads
1146 self.ui.status(_("searching for changes\n"))
1146 self.ui.status(_("searching for changes\n"))
1147
1147
1148 unknown = []
1148 unknown = []
1149 for h in heads:
1149 for h in heads:
1150 if h not in m:
1150 if h not in m:
1151 unknown.append(h)
1151 unknown.append(h)
1152 else:
1152 else:
1153 base[h] = 1
1153 base[h] = 1
1154
1154
1155 if not unknown:
1155 if not unknown:
1156 return []
1156 return []
1157
1157
1158 req = dict.fromkeys(unknown)
1158 req = dict.fromkeys(unknown)
1159 reqcnt = 0
1159 reqcnt = 0
1160
1160
1161 # search through remote branches
1161 # search through remote branches
1162 # a 'branch' here is a linear segment of history, with four parts:
1162 # a 'branch' here is a linear segment of history, with four parts:
1163 # head, root, first parent, second parent
1163 # head, root, first parent, second parent
1164 # (a branch always has two parents (or none) by definition)
1164 # (a branch always has two parents (or none) by definition)
1165 unknown = remote.branches(unknown)
1165 unknown = remote.branches(unknown)
1166 while unknown:
1166 while unknown:
1167 r = []
1167 r = []
1168 while unknown:
1168 while unknown:
1169 n = unknown.pop(0)
1169 n = unknown.pop(0)
1170 if n[0] in seen:
1170 if n[0] in seen:
1171 continue
1171 continue
1172
1172
1173 self.ui.debug(_("examining %s:%s\n")
1173 self.ui.debug(_("examining %s:%s\n")
1174 % (short(n[0]), short(n[1])))
1174 % (short(n[0]), short(n[1])))
1175 if n[0] == nullid: # found the end of the branch
1175 if n[0] == nullid: # found the end of the branch
1176 pass
1176 pass
1177 elif n in seenbranch:
1177 elif n in seenbranch:
1178 self.ui.debug(_("branch already found\n"))
1178 self.ui.debug(_("branch already found\n"))
1179 continue
1179 continue
1180 elif n[1] and n[1] in m: # do we know the base?
1180 elif n[1] and n[1] in m: # do we know the base?
1181 self.ui.debug(_("found incomplete branch %s:%s\n")
1181 self.ui.debug(_("found incomplete branch %s:%s\n")
1182 % (short(n[0]), short(n[1])))
1182 % (short(n[0]), short(n[1])))
1183 search.append(n) # schedule branch range for scanning
1183 search.append(n) # schedule branch range for scanning
1184 seenbranch[n] = 1
1184 seenbranch[n] = 1
1185 else:
1185 else:
1186 if n[1] not in seen and n[1] not in fetch:
1186 if n[1] not in seen and n[1] not in fetch:
1187 if n[2] in m and n[3] in m:
1187 if n[2] in m and n[3] in m:
1188 self.ui.debug(_("found new changeset %s\n") %
1188 self.ui.debug(_("found new changeset %s\n") %
1189 short(n[1]))
1189 short(n[1]))
1190 fetch[n[1]] = 1 # earliest unknown
1190 fetch[n[1]] = 1 # earliest unknown
1191 for p in n[2:4]:
1191 for p in n[2:4]:
1192 if p in m:
1192 if p in m:
1193 base[p] = 1 # latest known
1193 base[p] = 1 # latest known
1194
1194
1195 for p in n[2:4]:
1195 for p in n[2:4]:
1196 if p not in req and p not in m:
1196 if p not in req and p not in m:
1197 r.append(p)
1197 r.append(p)
1198 req[p] = 1
1198 req[p] = 1
1199 seen[n[0]] = 1
1199 seen[n[0]] = 1
1200
1200
1201 if r:
1201 if r:
1202 reqcnt += 1
1202 reqcnt += 1
1203 self.ui.debug(_("request %d: %s\n") %
1203 self.ui.debug(_("request %d: %s\n") %
1204 (reqcnt, " ".join(map(short, r))))
1204 (reqcnt, " ".join(map(short, r))))
1205 for p in xrange(0, len(r), 10):
1205 for p in xrange(0, len(r), 10):
1206 for b in remote.branches(r[p:p+10]):
1206 for b in remote.branches(r[p:p+10]):
1207 self.ui.debug(_("received %s:%s\n") %
1207 self.ui.debug(_("received %s:%s\n") %
1208 (short(b[0]), short(b[1])))
1208 (short(b[0]), short(b[1])))
1209 unknown.append(b)
1209 unknown.append(b)
1210
1210
1211 # do binary search on the branches we found
1211 # do binary search on the branches we found
1212 while search:
1212 while search:
1213 n = search.pop(0)
1213 n = search.pop(0)
1214 reqcnt += 1
1214 reqcnt += 1
1215 l = remote.between([(n[0], n[1])])[0]
1215 l = remote.between([(n[0], n[1])])[0]
1216 l.append(n[1])
1216 l.append(n[1])
1217 p = n[0]
1217 p = n[0]
1218 f = 1
1218 f = 1
1219 for i in l:
1219 for i in l:
1220 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1220 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1221 if i in m:
1221 if i in m:
1222 if f <= 2:
1222 if f <= 2:
1223 self.ui.debug(_("found new branch changeset %s\n") %
1223 self.ui.debug(_("found new branch changeset %s\n") %
1224 short(p))
1224 short(p))
1225 fetch[p] = 1
1225 fetch[p] = 1
1226 base[i] = 1
1226 base[i] = 1
1227 else:
1227 else:
1228 self.ui.debug(_("narrowed branch search to %s:%s\n")
1228 self.ui.debug(_("narrowed branch search to %s:%s\n")
1229 % (short(p), short(i)))
1229 % (short(p), short(i)))
1230 search.append((p, i))
1230 search.append((p, i))
1231 break
1231 break
1232 p, f = i, f * 2
1232 p, f = i, f * 2
1233
1233
1234 # sanity check our fetch list
1234 # sanity check our fetch list
1235 for f in fetch.keys():
1235 for f in fetch.keys():
1236 if f in m:
1236 if f in m:
1237 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1237 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1238
1238
1239 if base.keys() == [nullid]:
1239 if base.keys() == [nullid]:
1240 if force:
1240 if force:
1241 self.ui.warn(_("warning: repository is unrelated\n"))
1241 self.ui.warn(_("warning: repository is unrelated\n"))
1242 else:
1242 else:
1243 raise util.Abort(_("repository is unrelated"))
1243 raise util.Abort(_("repository is unrelated"))
1244
1244
1245 self.ui.debug(_("found new changesets starting at ") +
1245 self.ui.debug(_("found new changesets starting at ") +
1246 " ".join([short(f) for f in fetch]) + "\n")
1246 " ".join([short(f) for f in fetch]) + "\n")
1247
1247
1248 self.ui.debug(_("%d total queries\n") % reqcnt)
1248 self.ui.debug(_("%d total queries\n") % reqcnt)
1249
1249
1250 return fetch.keys()
1250 return fetch.keys()
1251
1251
1252 def findoutgoing(self, remote, base=None, heads=None, force=False):
1252 def findoutgoing(self, remote, base=None, heads=None, force=False):
1253 """Return list of nodes that are roots of subsets not in remote
1253 """Return list of nodes that are roots of subsets not in remote
1254
1254
1255 If base dict is specified, assume that these nodes and their parents
1255 If base dict is specified, assume that these nodes and their parents
1256 exist on the remote side.
1256 exist on the remote side.
1257 If a list of heads is specified, return only nodes which are heads
1257 If a list of heads is specified, return only nodes which are heads
1258 or ancestors of these heads, and return a second element which
1258 or ancestors of these heads, and return a second element which
1259 contains all remote heads which get new children.
1259 contains all remote heads which get new children.
1260 """
1260 """
1261 if base == None:
1261 if base == None:
1262 base = {}
1262 base = {}
1263 self.findincoming(remote, base, heads, force=force)
1263 self.findincoming(remote, base, heads, force=force)
1264
1264
1265 self.ui.debug(_("common changesets up to ")
1265 self.ui.debug(_("common changesets up to ")
1266 + " ".join(map(short, base.keys())) + "\n")
1266 + " ".join(map(short, base.keys())) + "\n")
1267
1267
1268 remain = dict.fromkeys(self.changelog.nodemap)
1268 remain = dict.fromkeys(self.changelog.nodemap)
1269
1269
1270 # prune everything remote has from the tree
1270 # prune everything remote has from the tree
1271 del remain[nullid]
1271 del remain[nullid]
1272 remove = base.keys()
1272 remove = base.keys()
1273 while remove:
1273 while remove:
1274 n = remove.pop(0)
1274 n = remove.pop(0)
1275 if n in remain:
1275 if n in remain:
1276 del remain[n]
1276 del remain[n]
1277 for p in self.changelog.parents(n):
1277 for p in self.changelog.parents(n):
1278 remove.append(p)
1278 remove.append(p)
1279
1279
1280 # find every node whose parents have been pruned
1280 # find every node whose parents have been pruned
1281 subset = []
1281 subset = []
1282 # find every remote head that will get new children
1282 # find every remote head that will get new children
1283 updated_heads = {}
1283 updated_heads = {}
1284 for n in remain:
1284 for n in remain:
1285 p1, p2 = self.changelog.parents(n)
1285 p1, p2 = self.changelog.parents(n)
1286 if p1 not in remain and p2 not in remain:
1286 if p1 not in remain and p2 not in remain:
1287 subset.append(n)
1287 subset.append(n)
1288 if heads:
1288 if heads:
1289 if p1 in heads:
1289 if p1 in heads:
1290 updated_heads[p1] = True
1290 updated_heads[p1] = True
1291 if p2 in heads:
1291 if p2 in heads:
1292 updated_heads[p2] = True
1292 updated_heads[p2] = True
1293
1293
1294 # this is the set of all roots we have to push
1294 # this is the set of all roots we have to push
1295 if heads:
1295 if heads:
1296 return subset, updated_heads.keys()
1296 return subset, updated_heads.keys()
1297 else:
1297 else:
1298 return subset
1298 return subset
1299
1299
1300 def pull(self, remote, heads=None, force=False, lock=None):
1300 def pull(self, remote, heads=None, force=False, lock=None):
1301 mylock = False
1301 mylock = False
1302 if not lock:
1302 if not lock:
1303 lock = self.lock()
1303 lock = self.lock()
1304 mylock = True
1304 mylock = True
1305
1305
1306 try:
1306 try:
1307 fetch = self.findincoming(remote, force=force)
1307 fetch = self.findincoming(remote, force=force)
1308 if fetch == [nullid]:
1308 if fetch == [nullid]:
1309 self.ui.status(_("requesting all changes\n"))
1309 self.ui.status(_("requesting all changes\n"))
1310
1310
1311 if not fetch:
1311 if not fetch:
1312 self.ui.status(_("no changes found\n"))
1312 self.ui.status(_("no changes found\n"))
1313 return 0
1313 return 0
1314
1314
1315 if heads is None:
1315 if heads is None:
1316 cg = remote.changegroup(fetch, 'pull')
1316 cg = remote.changegroup(fetch, 'pull')
1317 else:
1317 else:
1318 if 'changegroupsubset' not in remote.capabilities:
1318 if 'changegroupsubset' not in remote.capabilities:
1319 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1319 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1320 cg = remote.changegroupsubset(fetch, heads, 'pull')
1320 cg = remote.changegroupsubset(fetch, heads, 'pull')
1321 return self.addchangegroup(cg, 'pull', remote.url())
1321 return self.addchangegroup(cg, 'pull', remote.url())
1322 finally:
1322 finally:
1323 if mylock:
1323 if mylock:
1324 lock.release()
1324 lock.release()
1325
1325
1326 def push(self, remote, force=False, revs=None):
1326 def push(self, remote, force=False, revs=None):
1327 # there are two ways to push to remote repo:
1327 # there are two ways to push to remote repo:
1328 #
1328 #
1329 # addchangegroup assumes local user can lock remote
1329 # addchangegroup assumes local user can lock remote
1330 # repo (local filesystem, old ssh servers).
1330 # repo (local filesystem, old ssh servers).
1331 #
1331 #
1332 # unbundle assumes local user cannot lock remote repo (new ssh
1332 # unbundle assumes local user cannot lock remote repo (new ssh
1333 # servers, http servers).
1333 # servers, http servers).
1334
1334
1335 if remote.capable('unbundle'):
1335 if remote.capable('unbundle'):
1336 return self.push_unbundle(remote, force, revs)
1336 return self.push_unbundle(remote, force, revs)
1337 return self.push_addchangegroup(remote, force, revs)
1337 return self.push_addchangegroup(remote, force, revs)
1338
1338
1339 def prepush(self, remote, force, revs):
1339 def prepush(self, remote, force, revs):
1340 base = {}
1340 base = {}
1341 remote_heads = remote.heads()
1341 remote_heads = remote.heads()
1342 inc = self.findincoming(remote, base, remote_heads, force=force)
1342 inc = self.findincoming(remote, base, remote_heads, force=force)
1343
1343
1344 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1344 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1345 if revs is not None:
1345 if revs is not None:
1346 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1346 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1347 else:
1347 else:
1348 bases, heads = update, self.changelog.heads()
1348 bases, heads = update, self.changelog.heads()
1349
1349
1350 if not bases:
1350 if not bases:
1351 self.ui.status(_("no changes found\n"))
1351 self.ui.status(_("no changes found\n"))
1352 return None, 1
1352 return None, 1
1353 elif not force:
1353 elif not force:
1354 # check if we're creating new remote heads
1354 # check if we're creating new remote heads
1355 # to be a remote head after push, node must be either
1355 # to be a remote head after push, node must be either
1356 # - unknown locally
1356 # - unknown locally
1357 # - a local outgoing head descended from update
1357 # - a local outgoing head descended from update
1358 # - a remote head that's known locally and not
1358 # - a remote head that's known locally and not
1359 # ancestral to an outgoing head
1359 # ancestral to an outgoing head
1360
1360
1361 warn = 0
1361 warn = 0
1362
1362
1363 if remote_heads == [nullid]:
1363 if remote_heads == [nullid]:
1364 warn = 0
1364 warn = 0
1365 elif not revs and len(heads) > len(remote_heads):
1365 elif not revs and len(heads) > len(remote_heads):
1366 warn = 1
1366 warn = 1
1367 else:
1367 else:
1368 newheads = list(heads)
1368 newheads = list(heads)
1369 for r in remote_heads:
1369 for r in remote_heads:
1370 if r in self.changelog.nodemap:
1370 if r in self.changelog.nodemap:
1371 desc = self.changelog.heads(r, heads)
1371 desc = self.changelog.heads(r, heads)
1372 l = [h for h in heads if h in desc]
1372 l = [h for h in heads if h in desc]
1373 if not l:
1373 if not l:
1374 newheads.append(r)
1374 newheads.append(r)
1375 else:
1375 else:
1376 newheads.append(r)
1376 newheads.append(r)
1377 if len(newheads) > len(remote_heads):
1377 if len(newheads) > len(remote_heads):
1378 warn = 1
1378 warn = 1
1379
1379
1380 if warn:
1380 if warn:
1381 self.ui.warn(_("abort: push creates new remote branches!\n"))
1381 self.ui.warn(_("abort: push creates new remote branches!\n"))
1382 self.ui.status(_("(did you forget to merge?"
1382 self.ui.status(_("(did you forget to merge?"
1383 " use push -f to force)\n"))
1383 " use push -f to force)\n"))
1384 return None, 1
1384 return None, 1
1385 elif inc:
1385 elif inc:
1386 self.ui.warn(_("note: unsynced remote changes!\n"))
1386 self.ui.warn(_("note: unsynced remote changes!\n"))
1387
1387
1388
1388
1389 if revs is None:
1389 if revs is None:
1390 cg = self.changegroup(update, 'push')
1390 cg = self.changegroup(update, 'push')
1391 else:
1391 else:
1392 cg = self.changegroupsubset(update, revs, 'push')
1392 cg = self.changegroupsubset(update, revs, 'push')
1393 return cg, remote_heads
1393 return cg, remote_heads
1394
1394
1395 def push_addchangegroup(self, remote, force, revs):
1395 def push_addchangegroup(self, remote, force, revs):
1396 lock = remote.lock()
1396 lock = remote.lock()
1397
1397
1398 ret = self.prepush(remote, force, revs)
1398 ret = self.prepush(remote, force, revs)
1399 if ret[0] is not None:
1399 if ret[0] is not None:
1400 cg, remote_heads = ret
1400 cg, remote_heads = ret
1401 return remote.addchangegroup(cg, 'push', self.url())
1401 return remote.addchangegroup(cg, 'push', self.url())
1402 return ret[1]
1402 return ret[1]
1403
1403
1404 def push_unbundle(self, remote, force, revs):
1404 def push_unbundle(self, remote, force, revs):
1405 # local repo finds heads on server, finds out what revs it
1405 # local repo finds heads on server, finds out what revs it
1406 # must push. once revs transferred, if server finds it has
1406 # must push. once revs transferred, if server finds it has
1407 # different heads (someone else won commit/push race), server
1407 # different heads (someone else won commit/push race), server
1408 # aborts.
1408 # aborts.
1409
1409
1410 ret = self.prepush(remote, force, revs)
1410 ret = self.prepush(remote, force, revs)
1411 if ret[0] is not None:
1411 if ret[0] is not None:
1412 cg, remote_heads = ret
1412 cg, remote_heads = ret
1413 if force: remote_heads = ['force']
1413 if force: remote_heads = ['force']
1414 return remote.unbundle(cg, remote_heads, 'push')
1414 return remote.unbundle(cg, remote_heads, 'push')
1415 return ret[1]
1415 return ret[1]
1416
1416
1417 def changegroupinfo(self, nodes):
1417 def changegroupinfo(self, nodes):
1418 self.ui.note(_("%d changesets found\n") % len(nodes))
1418 self.ui.note(_("%d changesets found\n") % len(nodes))
1419 if self.ui.debugflag:
1419 if self.ui.debugflag:
1420 self.ui.debug(_("List of changesets:\n"))
1420 self.ui.debug(_("List of changesets:\n"))
1421 for node in nodes:
1421 for node in nodes:
1422 self.ui.debug("%s\n" % hex(node))
1422 self.ui.debug("%s\n" % hex(node))
1423
1423
1424 def changegroupsubset(self, bases, heads, source):
1424 def changegroupsubset(self, bases, heads, source):
1425 """This function generates a changegroup consisting of all the nodes
1425 """This function generates a changegroup consisting of all the nodes
1426 that are descendents of any of the bases, and ancestors of any of
1426 that are descendents of any of the bases, and ancestors of any of
1427 the heads.
1427 the heads.
1428
1428
1429 It is fairly complex as determining which filenodes and which
1429 It is fairly complex as determining which filenodes and which
1430 manifest nodes need to be included for the changeset to be complete
1430 manifest nodes need to be included for the changeset to be complete
1431 is non-trivial.
1431 is non-trivial.
1432
1432
1433 Another wrinkle is doing the reverse, figuring out which changeset in
1433 Another wrinkle is doing the reverse, figuring out which changeset in
1434 the changegroup a particular filenode or manifestnode belongs to."""
1434 the changegroup a particular filenode or manifestnode belongs to."""
1435
1435
1436 self.hook('preoutgoing', throw=True, source=source)
1436 self.hook('preoutgoing', throw=True, source=source)
1437
1437
1438 # Set up some initial variables
1438 # Set up some initial variables
1439 # Make it easy to refer to self.changelog
1439 # Make it easy to refer to self.changelog
1440 cl = self.changelog
1440 cl = self.changelog
1441 # msng is short for missing - compute the list of changesets in this
1441 # msng is short for missing - compute the list of changesets in this
1442 # changegroup.
1442 # changegroup.
1443 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1443 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1444 self.changegroupinfo(msng_cl_lst)
1444 self.changegroupinfo(msng_cl_lst)
1445 # Some bases may turn out to be superfluous, and some heads may be
1445 # Some bases may turn out to be superfluous, and some heads may be
1446 # too. nodesbetween will return the minimal set of bases and heads
1446 # too. nodesbetween will return the minimal set of bases and heads
1447 # necessary to re-create the changegroup.
1447 # necessary to re-create the changegroup.
1448
1448
1449 # Known heads are the list of heads that it is assumed the recipient
1449 # Known heads are the list of heads that it is assumed the recipient
1450 # of this changegroup will know about.
1450 # of this changegroup will know about.
1451 knownheads = {}
1451 knownheads = {}
1452 # We assume that all parents of bases are known heads.
1452 # We assume that all parents of bases are known heads.
1453 for n in bases:
1453 for n in bases:
1454 for p in cl.parents(n):
1454 for p in cl.parents(n):
1455 if p != nullid:
1455 if p != nullid:
1456 knownheads[p] = 1
1456 knownheads[p] = 1
1457 knownheads = knownheads.keys()
1457 knownheads = knownheads.keys()
1458 if knownheads:
1458 if knownheads:
1459 # Now that we know what heads are known, we can compute which
1459 # Now that we know what heads are known, we can compute which
1460 # changesets are known. The recipient must know about all
1460 # changesets are known. The recipient must know about all
1461 # changesets required to reach the known heads from the null
1461 # changesets required to reach the known heads from the null
1462 # changeset.
1462 # changeset.
1463 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1463 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1464 junk = None
1464 junk = None
1465 # Transform the list into an ersatz set.
1465 # Transform the list into an ersatz set.
1466 has_cl_set = dict.fromkeys(has_cl_set)
1466 has_cl_set = dict.fromkeys(has_cl_set)
1467 else:
1467 else:
1468 # If there were no known heads, the recipient cannot be assumed to
1468 # If there were no known heads, the recipient cannot be assumed to
1469 # know about any changesets.
1469 # know about any changesets.
1470 has_cl_set = {}
1470 has_cl_set = {}
1471
1471
1472 # Make it easy to refer to self.manifest
1472 # Make it easy to refer to self.manifest
1473 mnfst = self.manifest
1473 mnfst = self.manifest
1474 # We don't know which manifests are missing yet
1474 # We don't know which manifests are missing yet
1475 msng_mnfst_set = {}
1475 msng_mnfst_set = {}
1476 # Nor do we know which filenodes are missing.
1476 # Nor do we know which filenodes are missing.
1477 msng_filenode_set = {}
1477 msng_filenode_set = {}
1478
1478
1479 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1479 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1480 junk = None
1480 junk = None
1481
1481
1482 # A changeset always belongs to itself, so the changenode lookup
1482 # A changeset always belongs to itself, so the changenode lookup
1483 # function for a changenode is identity.
1483 # function for a changenode is identity.
1484 def identity(x):
1484 def identity(x):
1485 return x
1485 return x
1486
1486
1487 # A function generating function. Sets up an environment for the
1487 # A function generating function. Sets up an environment for the
1488 # inner function.
1488 # inner function.
1489 def cmp_by_rev_func(revlog):
1489 def cmp_by_rev_func(revlog):
1490 # Compare two nodes by their revision number in the environment's
1490 # Compare two nodes by their revision number in the environment's
1491 # revision history. Since the revision number both represents the
1491 # revision history. Since the revision number both represents the
1492 # most efficient order to read the nodes in, and represents a
1492 # most efficient order to read the nodes in, and represents a
1493 # topological sorting of the nodes, this function is often useful.
1493 # topological sorting of the nodes, this function is often useful.
1494 def cmp_by_rev(a, b):
1494 def cmp_by_rev(a, b):
1495 return cmp(revlog.rev(a), revlog.rev(b))
1495 return cmp(revlog.rev(a), revlog.rev(b))
1496 return cmp_by_rev
1496 return cmp_by_rev
1497
1497
1498 # If we determine that a particular file or manifest node must be a
1498 # If we determine that a particular file or manifest node must be a
1499 # node that the recipient of the changegroup will already have, we can
1499 # node that the recipient of the changegroup will already have, we can
1500 # also assume the recipient will have all the parents. This function
1500 # also assume the recipient will have all the parents. This function
1501 # prunes them from the set of missing nodes.
1501 # prunes them from the set of missing nodes.
1502 def prune_parents(revlog, hasset, msngset):
1502 def prune_parents(revlog, hasset, msngset):
1503 haslst = hasset.keys()
1503 haslst = hasset.keys()
1504 haslst.sort(cmp_by_rev_func(revlog))
1504 haslst.sort(cmp_by_rev_func(revlog))
1505 for node in haslst:
1505 for node in haslst:
1506 parentlst = [p for p in revlog.parents(node) if p != nullid]
1506 parentlst = [p for p in revlog.parents(node) if p != nullid]
1507 while parentlst:
1507 while parentlst:
1508 n = parentlst.pop()
1508 n = parentlst.pop()
1509 if n not in hasset:
1509 if n not in hasset:
1510 hasset[n] = 1
1510 hasset[n] = 1
1511 p = [p for p in revlog.parents(n) if p != nullid]
1511 p = [p for p in revlog.parents(n) if p != nullid]
1512 parentlst.extend(p)
1512 parentlst.extend(p)
1513 for n in hasset:
1513 for n in hasset:
1514 msngset.pop(n, None)
1514 msngset.pop(n, None)
1515
1515
1516 # This is a function generating function used to set up an environment
1516 # This is a function generating function used to set up an environment
1517 # for the inner function to execute in.
1517 # for the inner function to execute in.
1518 def manifest_and_file_collector(changedfileset):
1518 def manifest_and_file_collector(changedfileset):
1519 # This is an information gathering function that gathers
1519 # This is an information gathering function that gathers
1520 # information from each changeset node that goes out as part of
1520 # information from each changeset node that goes out as part of
1521 # the changegroup. The information gathered is a list of which
1521 # the changegroup. The information gathered is a list of which
1522 # manifest nodes are potentially required (the recipient may
1522 # manifest nodes are potentially required (the recipient may
1523 # already have them) and total list of all files which were
1523 # already have them) and total list of all files which were
1524 # changed in any changeset in the changegroup.
1524 # changed in any changeset in the changegroup.
1525 #
1525 #
1526 # We also remember the first changenode we saw any manifest
1526 # We also remember the first changenode we saw any manifest
1527 # referenced by so we can later determine which changenode 'owns'
1527 # referenced by so we can later determine which changenode 'owns'
1528 # the manifest.
1528 # the manifest.
1529 def collect_manifests_and_files(clnode):
1529 def collect_manifests_and_files(clnode):
1530 c = cl.read(clnode)
1530 c = cl.read(clnode)
1531 for f in c[3]:
1531 for f in c[3]:
1532 # This is to make sure we only have one instance of each
1532 # This is to make sure we only have one instance of each
1533 # filename string for each filename.
1533 # filename string for each filename.
1534 changedfileset.setdefault(f, f)
1534 changedfileset.setdefault(f, f)
1535 msng_mnfst_set.setdefault(c[0], clnode)
1535 msng_mnfst_set.setdefault(c[0], clnode)
1536 return collect_manifests_and_files
1536 return collect_manifests_and_files
1537
1537
1538 # Figure out which manifest nodes (of the ones we think might be part
1538 # Figure out which manifest nodes (of the ones we think might be part
1539 # of the changegroup) the recipient must know about and remove them
1539 # of the changegroup) the recipient must know about and remove them
1540 # from the changegroup.
1540 # from the changegroup.
1541 def prune_manifests():
1541 def prune_manifests():
1542 has_mnfst_set = {}
1542 has_mnfst_set = {}
1543 for n in msng_mnfst_set:
1543 for n in msng_mnfst_set:
1544 # If a 'missing' manifest thinks it belongs to a changenode
1544 # If a 'missing' manifest thinks it belongs to a changenode
1545 # the recipient is assumed to have, obviously the recipient
1545 # the recipient is assumed to have, obviously the recipient
1546 # must have that manifest.
1546 # must have that manifest.
1547 linknode = cl.node(mnfst.linkrev(n))
1547 linknode = cl.node(mnfst.linkrev(n))
1548 if linknode in has_cl_set:
1548 if linknode in has_cl_set:
1549 has_mnfst_set[n] = 1
1549 has_mnfst_set[n] = 1
1550 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1550 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1551
1551
1552 # Use the information collected in collect_manifests_and_files to say
1552 # Use the information collected in collect_manifests_and_files to say
1553 # which changenode any manifestnode belongs to.
1553 # which changenode any manifestnode belongs to.
1554 def lookup_manifest_link(mnfstnode):
1554 def lookup_manifest_link(mnfstnode):
1555 return msng_mnfst_set[mnfstnode]
1555 return msng_mnfst_set[mnfstnode]
1556
1556
1557 # A function generating function that sets up the initial environment
1557 # A function generating function that sets up the initial environment
1558 # the inner function.
1558 # the inner function.
1559 def filenode_collector(changedfiles):
1559 def filenode_collector(changedfiles):
1560 next_rev = [0]
1560 next_rev = [0]
1561 # This gathers information from each manifestnode included in the
1561 # This gathers information from each manifestnode included in the
1562 # changegroup about which filenodes the manifest node references
1562 # changegroup about which filenodes the manifest node references
1563 # so we can include those in the changegroup too.
1563 # so we can include those in the changegroup too.
1564 #
1564 #
1565 # It also remembers which changenode each filenode belongs to. It
1565 # It also remembers which changenode each filenode belongs to. It
1566 # does this by assuming the a filenode belongs to the changenode
1566 # does this by assuming the a filenode belongs to the changenode
1567 # the first manifest that references it belongs to.
1567 # the first manifest that references it belongs to.
1568 def collect_msng_filenodes(mnfstnode):
1568 def collect_msng_filenodes(mnfstnode):
1569 r = mnfst.rev(mnfstnode)
1569 r = mnfst.rev(mnfstnode)
1570 if r == next_rev[0]:
1570 if r == next_rev[0]:
1571 # If the last rev we looked at was the one just previous,
1571 # If the last rev we looked at was the one just previous,
1572 # we only need to see a diff.
1572 # we only need to see a diff.
1573 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1573 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1574 # For each line in the delta
1574 # For each line in the delta
1575 for dline in delta.splitlines():
1575 for dline in delta.splitlines():
1576 # get the filename and filenode for that line
1576 # get the filename and filenode for that line
1577 f, fnode = dline.split('\0')
1577 f, fnode = dline.split('\0')
1578 fnode = bin(fnode[:40])
1578 fnode = bin(fnode[:40])
1579 f = changedfiles.get(f, None)
1579 f = changedfiles.get(f, None)
1580 # And if the file is in the list of files we care
1580 # And if the file is in the list of files we care
1581 # about.
1581 # about.
1582 if f is not None:
1582 if f is not None:
1583 # Get the changenode this manifest belongs to
1583 # Get the changenode this manifest belongs to
1584 clnode = msng_mnfst_set[mnfstnode]
1584 clnode = msng_mnfst_set[mnfstnode]
1585 # Create the set of filenodes for the file if
1585 # Create the set of filenodes for the file if
1586 # there isn't one already.
1586 # there isn't one already.
1587 ndset = msng_filenode_set.setdefault(f, {})
1587 ndset = msng_filenode_set.setdefault(f, {})
1588 # And set the filenode's changelog node to the
1588 # And set the filenode's changelog node to the
1589 # manifest's if it hasn't been set already.
1589 # manifest's if it hasn't been set already.
1590 ndset.setdefault(fnode, clnode)
1590 ndset.setdefault(fnode, clnode)
1591 else:
1591 else:
1592 # Otherwise we need a full manifest.
1592 # Otherwise we need a full manifest.
1593 m = mnfst.read(mnfstnode)
1593 m = mnfst.read(mnfstnode)
1594 # For every file in we care about.
1594 # For every file in we care about.
1595 for f in changedfiles:
1595 for f in changedfiles:
1596 fnode = m.get(f, None)
1596 fnode = m.get(f, None)
1597 # If it's in the manifest
1597 # If it's in the manifest
1598 if fnode is not None:
1598 if fnode is not None:
1599 # See comments above.
1599 # See comments above.
1600 clnode = msng_mnfst_set[mnfstnode]
1600 clnode = msng_mnfst_set[mnfstnode]
1601 ndset = msng_filenode_set.setdefault(f, {})
1601 ndset = msng_filenode_set.setdefault(f, {})
1602 ndset.setdefault(fnode, clnode)
1602 ndset.setdefault(fnode, clnode)
1603 # Remember the revision we hope to see next.
1603 # Remember the revision we hope to see next.
1604 next_rev[0] = r + 1
1604 next_rev[0] = r + 1
1605 return collect_msng_filenodes
1605 return collect_msng_filenodes
1606
1606
1607 # We have a list of filenodes we think we need for a file, lets remove
1607 # We have a list of filenodes we think we need for a file, lets remove
1608 # all those we now the recipient must have.
1608 # all those we now the recipient must have.
1609 def prune_filenodes(f, filerevlog):
1609 def prune_filenodes(f, filerevlog):
1610 msngset = msng_filenode_set[f]
1610 msngset = msng_filenode_set[f]
1611 hasset = {}
1611 hasset = {}
1612 # If a 'missing' filenode thinks it belongs to a changenode we
1612 # If a 'missing' filenode thinks it belongs to a changenode we
1613 # assume the recipient must have, then the recipient must have
1613 # assume the recipient must have, then the recipient must have
1614 # that filenode.
1614 # that filenode.
1615 for n in msngset:
1615 for n in msngset:
1616 clnode = cl.node(filerevlog.linkrev(n))
1616 clnode = cl.node(filerevlog.linkrev(n))
1617 if clnode in has_cl_set:
1617 if clnode in has_cl_set:
1618 hasset[n] = 1
1618 hasset[n] = 1
1619 prune_parents(filerevlog, hasset, msngset)
1619 prune_parents(filerevlog, hasset, msngset)
1620
1620
1621 # A function generator function that sets up the a context for the
1621 # A function generator function that sets up the a context for the
1622 # inner function.
1622 # inner function.
1623 def lookup_filenode_link_func(fname):
1623 def lookup_filenode_link_func(fname):
1624 msngset = msng_filenode_set[fname]
1624 msngset = msng_filenode_set[fname]
1625 # Lookup the changenode the filenode belongs to.
1625 # Lookup the changenode the filenode belongs to.
1626 def lookup_filenode_link(fnode):
1626 def lookup_filenode_link(fnode):
1627 return msngset[fnode]
1627 return msngset[fnode]
1628 return lookup_filenode_link
1628 return lookup_filenode_link
1629
1629
1630 # Now that we have all theses utility functions to help out and
1630 # Now that we have all theses utility functions to help out and
1631 # logically divide up the task, generate the group.
1631 # logically divide up the task, generate the group.
1632 def gengroup():
1632 def gengroup():
1633 # The set of changed files starts empty.
1633 # The set of changed files starts empty.
1634 changedfiles = {}
1634 changedfiles = {}
1635 # Create a changenode group generator that will call our functions
1635 # Create a changenode group generator that will call our functions
1636 # back to lookup the owning changenode and collect information.
1636 # back to lookup the owning changenode and collect information.
1637 group = cl.group(msng_cl_lst, identity,
1637 group = cl.group(msng_cl_lst, identity,
1638 manifest_and_file_collector(changedfiles))
1638 manifest_and_file_collector(changedfiles))
1639 for chnk in group:
1639 for chnk in group:
1640 yield chnk
1640 yield chnk
1641
1641
1642 # The list of manifests has been collected by the generator
1642 # The list of manifests has been collected by the generator
1643 # calling our functions back.
1643 # calling our functions back.
1644 prune_manifests()
1644 prune_manifests()
1645 msng_mnfst_lst = msng_mnfst_set.keys()
1645 msng_mnfst_lst = msng_mnfst_set.keys()
1646 # Sort the manifestnodes by revision number.
1646 # Sort the manifestnodes by revision number.
1647 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1647 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1648 # Create a generator for the manifestnodes that calls our lookup
1648 # Create a generator for the manifestnodes that calls our lookup
1649 # and data collection functions back.
1649 # and data collection functions back.
1650 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1650 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1651 filenode_collector(changedfiles))
1651 filenode_collector(changedfiles))
1652 for chnk in group:
1652 for chnk in group:
1653 yield chnk
1653 yield chnk
1654
1654
1655 # These are no longer needed, dereference and toss the memory for
1655 # These are no longer needed, dereference and toss the memory for
1656 # them.
1656 # them.
1657 msng_mnfst_lst = None
1657 msng_mnfst_lst = None
1658 msng_mnfst_set.clear()
1658 msng_mnfst_set.clear()
1659
1659
1660 changedfiles = changedfiles.keys()
1660 changedfiles = changedfiles.keys()
1661 changedfiles.sort()
1661 changedfiles.sort()
1662 # Go through all our files in order sorted by name.
1662 # Go through all our files in order sorted by name.
1663 for fname in changedfiles:
1663 for fname in changedfiles:
1664 filerevlog = self.file(fname)
1664 filerevlog = self.file(fname)
1665 # Toss out the filenodes that the recipient isn't really
1665 # Toss out the filenodes that the recipient isn't really
1666 # missing.
1666 # missing.
1667 if msng_filenode_set.has_key(fname):
1667 if msng_filenode_set.has_key(fname):
1668 prune_filenodes(fname, filerevlog)
1668 prune_filenodes(fname, filerevlog)
1669 msng_filenode_lst = msng_filenode_set[fname].keys()
1669 msng_filenode_lst = msng_filenode_set[fname].keys()
1670 else:
1670 else:
1671 msng_filenode_lst = []
1671 msng_filenode_lst = []
1672 # If any filenodes are left, generate the group for them,
1672 # If any filenodes are left, generate the group for them,
1673 # otherwise don't bother.
1673 # otherwise don't bother.
1674 if len(msng_filenode_lst) > 0:
1674 if len(msng_filenode_lst) > 0:
1675 yield changegroup.genchunk(fname)
1675 yield changegroup.genchunk(fname)
1676 # Sort the filenodes by their revision #
1676 # Sort the filenodes by their revision #
1677 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1677 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1678 # Create a group generator and only pass in a changenode
1678 # Create a group generator and only pass in a changenode
1679 # lookup function as we need to collect no information
1679 # lookup function as we need to collect no information
1680 # from filenodes.
1680 # from filenodes.
1681 group = filerevlog.group(msng_filenode_lst,
1681 group = filerevlog.group(msng_filenode_lst,
1682 lookup_filenode_link_func(fname))
1682 lookup_filenode_link_func(fname))
1683 for chnk in group:
1683 for chnk in group:
1684 yield chnk
1684 yield chnk
1685 if msng_filenode_set.has_key(fname):
1685 if msng_filenode_set.has_key(fname):
1686 # Don't need this anymore, toss it to free memory.
1686 # Don't need this anymore, toss it to free memory.
1687 del msng_filenode_set[fname]
1687 del msng_filenode_set[fname]
1688 # Signal that no more groups are left.
1688 # Signal that no more groups are left.
1689 yield changegroup.closechunk()
1689 yield changegroup.closechunk()
1690
1690
1691 if msng_cl_lst:
1691 if msng_cl_lst:
1692 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1692 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1693
1693
1694 return util.chunkbuffer(gengroup())
1694 return util.chunkbuffer(gengroup())
1695
1695
1696 def changegroup(self, basenodes, source):
1696 def changegroup(self, basenodes, source):
1697 """Generate a changegroup of all nodes that we have that a recipient
1697 """Generate a changegroup of all nodes that we have that a recipient
1698 doesn't.
1698 doesn't.
1699
1699
1700 This is much easier than the previous function as we can assume that
1700 This is much easier than the previous function as we can assume that
1701 the recipient has any changenode we aren't sending them."""
1701 the recipient has any changenode we aren't sending them."""
1702
1702
1703 self.hook('preoutgoing', throw=True, source=source)
1703 self.hook('preoutgoing', throw=True, source=source)
1704
1704
1705 cl = self.changelog
1705 cl = self.changelog
1706 nodes = cl.nodesbetween(basenodes, None)[0]
1706 nodes = cl.nodesbetween(basenodes, None)[0]
1707 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1707 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1708 self.changegroupinfo(nodes)
1708 self.changegroupinfo(nodes)
1709
1709
1710 def identity(x):
1710 def identity(x):
1711 return x
1711 return x
1712
1712
1713 def gennodelst(revlog):
1713 def gennodelst(revlog):
1714 for r in xrange(0, revlog.count()):
1714 for r in xrange(0, revlog.count()):
1715 n = revlog.node(r)
1715 n = revlog.node(r)
1716 if revlog.linkrev(n) in revset:
1716 if revlog.linkrev(n) in revset:
1717 yield n
1717 yield n
1718
1718
1719 def changed_file_collector(changedfileset):
1719 def changed_file_collector(changedfileset):
1720 def collect_changed_files(clnode):
1720 def collect_changed_files(clnode):
1721 c = cl.read(clnode)
1721 c = cl.read(clnode)
1722 for fname in c[3]:
1722 for fname in c[3]:
1723 changedfileset[fname] = 1
1723 changedfileset[fname] = 1
1724 return collect_changed_files
1724 return collect_changed_files
1725
1725
1726 def lookuprevlink_func(revlog):
1726 def lookuprevlink_func(revlog):
1727 def lookuprevlink(n):
1727 def lookuprevlink(n):
1728 return cl.node(revlog.linkrev(n))
1728 return cl.node(revlog.linkrev(n))
1729 return lookuprevlink
1729 return lookuprevlink
1730
1730
1731 def gengroup():
1731 def gengroup():
1732 # construct a list of all changed files
1732 # construct a list of all changed files
1733 changedfiles = {}
1733 changedfiles = {}
1734
1734
1735 for chnk in cl.group(nodes, identity,
1735 for chnk in cl.group(nodes, identity,
1736 changed_file_collector(changedfiles)):
1736 changed_file_collector(changedfiles)):
1737 yield chnk
1737 yield chnk
1738 changedfiles = changedfiles.keys()
1738 changedfiles = changedfiles.keys()
1739 changedfiles.sort()
1739 changedfiles.sort()
1740
1740
1741 mnfst = self.manifest
1741 mnfst = self.manifest
1742 nodeiter = gennodelst(mnfst)
1742 nodeiter = gennodelst(mnfst)
1743 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1743 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1744 yield chnk
1744 yield chnk
1745
1745
1746 for fname in changedfiles:
1746 for fname in changedfiles:
1747 filerevlog = self.file(fname)
1747 filerevlog = self.file(fname)
1748 nodeiter = gennodelst(filerevlog)
1748 nodeiter = gennodelst(filerevlog)
1749 nodeiter = list(nodeiter)
1749 nodeiter = list(nodeiter)
1750 if nodeiter:
1750 if nodeiter:
1751 yield changegroup.genchunk(fname)
1751 yield changegroup.genchunk(fname)
1752 lookup = lookuprevlink_func(filerevlog)
1752 lookup = lookuprevlink_func(filerevlog)
1753 for chnk in filerevlog.group(nodeiter, lookup):
1753 for chnk in filerevlog.group(nodeiter, lookup):
1754 yield chnk
1754 yield chnk
1755
1755
1756 yield changegroup.closechunk()
1756 yield changegroup.closechunk()
1757
1757
1758 if nodes:
1758 if nodes:
1759 self.hook('outgoing', node=hex(nodes[0]), source=source)
1759 self.hook('outgoing', node=hex(nodes[0]), source=source)
1760
1760
1761 return util.chunkbuffer(gengroup())
1761 return util.chunkbuffer(gengroup())
1762
1762
1763 def addchangegroup(self, source, srctype, url):
1763 def addchangegroup(self, source, srctype, url):
1764 """add changegroup to repo.
1764 """add changegroup to repo.
1765
1765
1766 return values:
1766 return values:
1767 - nothing changed or no source: 0
1767 - nothing changed or no source: 0
1768 - more heads than before: 1+added heads (2..n)
1768 - more heads than before: 1+added heads (2..n)
1769 - less heads than before: -1-removed heads (-2..-n)
1769 - less heads than before: -1-removed heads (-2..-n)
1770 - number of heads stays the same: 1
1770 - number of heads stays the same: 1
1771 """
1771 """
1772 def csmap(x):
1772 def csmap(x):
1773 self.ui.debug(_("add changeset %s\n") % short(x))
1773 self.ui.debug(_("add changeset %s\n") % short(x))
1774 return cl.count()
1774 return cl.count()
1775
1775
1776 def revmap(x):
1776 def revmap(x):
1777 return cl.rev(x)
1777 return cl.rev(x)
1778
1778
1779 if not source:
1779 if not source:
1780 return 0
1780 return 0
1781
1781
1782 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1782 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1783
1783
1784 changesets = files = revisions = 0
1784 changesets = files = revisions = 0
1785
1785
1786 tr = self.transaction()
1786 tr = self.transaction()
1787
1787
1788 # write changelog data to temp files so concurrent readers will not see
1788 # write changelog data to temp files so concurrent readers will not see
1789 # inconsistent view
1789 # inconsistent view
1790 cl = None
1790 cl = None
1791 try:
1791 try:
1792 cl = appendfile.appendchangelog(self.sopener,
1792 cl = appendfile.appendchangelog(self.sopener,
1793 self.changelog.version)
1793 self.changelog.version)
1794
1794
1795 oldheads = len(cl.heads())
1795 oldheads = len(cl.heads())
1796
1796
1797 # pull off the changeset group
1797 # pull off the changeset group
1798 self.ui.status(_("adding changesets\n"))
1798 self.ui.status(_("adding changesets\n"))
1799 cor = cl.count() - 1
1799 cor = cl.count() - 1
1800 chunkiter = changegroup.chunkiter(source)
1800 chunkiter = changegroup.chunkiter(source)
1801 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1801 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1802 raise util.Abort(_("received changelog group is empty"))
1802 raise util.Abort(_("received changelog group is empty"))
1803 cnr = cl.count() - 1
1803 cnr = cl.count() - 1
1804 changesets = cnr - cor
1804 changesets = cnr - cor
1805
1805
1806 # pull off the manifest group
1806 # pull off the manifest group
1807 self.ui.status(_("adding manifests\n"))
1807 self.ui.status(_("adding manifests\n"))
1808 chunkiter = changegroup.chunkiter(source)
1808 chunkiter = changegroup.chunkiter(source)
1809 # no need to check for empty manifest group here:
1809 # no need to check for empty manifest group here:
1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1811 # no new manifest will be created and the manifest group will
1811 # no new manifest will be created and the manifest group will
1812 # be empty during the pull
1812 # be empty during the pull
1813 self.manifest.addgroup(chunkiter, revmap, tr)
1813 self.manifest.addgroup(chunkiter, revmap, tr)
1814
1814
1815 # process the files
1815 # process the files
1816 self.ui.status(_("adding file changes\n"))
1816 self.ui.status(_("adding file changes\n"))
1817 while 1:
1817 while 1:
1818 f = changegroup.getchunk(source)
1818 f = changegroup.getchunk(source)
1819 if not f:
1819 if not f:
1820 break
1820 break
1821 self.ui.debug(_("adding %s revisions\n") % f)
1821 self.ui.debug(_("adding %s revisions\n") % f)
1822 fl = self.file(f)
1822 fl = self.file(f)
1823 o = fl.count()
1823 o = fl.count()
1824 chunkiter = changegroup.chunkiter(source)
1824 chunkiter = changegroup.chunkiter(source)
1825 if fl.addgroup(chunkiter, revmap, tr) is None:
1825 if fl.addgroup(chunkiter, revmap, tr) is None:
1826 raise util.Abort(_("received file revlog group is empty"))
1826 raise util.Abort(_("received file revlog group is empty"))
1827 revisions += fl.count() - o
1827 revisions += fl.count() - o
1828 files += 1
1828 files += 1
1829
1829
1830 cl.writedata()
1830 cl.writedata()
1831 finally:
1831 finally:
1832 if cl:
1832 if cl:
1833 cl.cleanup()
1833 cl.cleanup()
1834
1834
1835 # make changelog see real files again
1835 # make changelog see real files again
1836 self.changelog = changelog.changelog(self.sopener,
1836 self.changelog = changelog.changelog(self.sopener,
1837 self.changelog.version)
1837 self.changelog.version)
1838 self.changelog.checkinlinesize(tr)
1838 self.changelog.checkinlinesize(tr)
1839
1839
1840 newheads = len(self.changelog.heads())
1840 newheads = len(self.changelog.heads())
1841 heads = ""
1841 heads = ""
1842 if oldheads and newheads != oldheads:
1842 if oldheads and newheads != oldheads:
1843 heads = _(" (%+d heads)") % (newheads - oldheads)
1843 heads = _(" (%+d heads)") % (newheads - oldheads)
1844
1844
1845 self.ui.status(_("added %d changesets"
1845 self.ui.status(_("added %d changesets"
1846 " with %d changes to %d files%s\n")
1846 " with %d changes to %d files%s\n")
1847 % (changesets, revisions, files, heads))
1847 % (changesets, revisions, files, heads))
1848
1848
1849 if changesets > 0:
1849 if changesets > 0:
1850 self.hook('pretxnchangegroup', throw=True,
1850 self.hook('pretxnchangegroup', throw=True,
1851 node=hex(self.changelog.node(cor+1)), source=srctype,
1851 node=hex(self.changelog.node(cor+1)), source=srctype,
1852 url=url)
1852 url=url)
1853
1853
1854 tr.close()
1854 tr.close()
1855
1855
1856 if changesets > 0:
1856 if changesets > 0:
1857 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1857 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1858 source=srctype, url=url)
1858 source=srctype, url=url)
1859
1859
1860 for i in xrange(cor + 1, cnr + 1):
1860 for i in xrange(cor + 1, cnr + 1):
1861 self.hook("incoming", node=hex(self.changelog.node(i)),
1861 self.hook("incoming", node=hex(self.changelog.node(i)),
1862 source=srctype, url=url)
1862 source=srctype, url=url)
1863
1863
1864 # never return 0 here:
1864 # never return 0 here:
1865 if newheads < oldheads:
1865 if newheads < oldheads:
1866 return newheads - oldheads - 1
1866 return newheads - oldheads - 1
1867 else:
1867 else:
1868 return newheads - oldheads + 1
1868 return newheads - oldheads + 1
1869
1869
1870
1870
1871 def stream_in(self, remote):
1871 def stream_in(self, remote):
1872 fp = remote.stream_out()
1872 fp = remote.stream_out()
1873 l = fp.readline()
1873 l = fp.readline()
1874 try:
1874 try:
1875 resp = int(l)
1875 resp = int(l)
1876 except ValueError:
1876 except ValueError:
1877 raise util.UnexpectedOutput(
1877 raise util.UnexpectedOutput(
1878 _('Unexpected response from remote server:'), l)
1878 _('Unexpected response from remote server:'), l)
1879 if resp == 1:
1879 if resp == 1:
1880 raise util.Abort(_('operation forbidden by server'))
1880 raise util.Abort(_('operation forbidden by server'))
1881 elif resp == 2:
1881 elif resp == 2:
1882 raise util.Abort(_('locking the remote repository failed'))
1882 raise util.Abort(_('locking the remote repository failed'))
1883 elif resp != 0:
1883 elif resp != 0:
1884 raise util.Abort(_('the server sent an unknown error code'))
1884 raise util.Abort(_('the server sent an unknown error code'))
1885 self.ui.status(_('streaming all changes\n'))
1885 self.ui.status(_('streaming all changes\n'))
1886 l = fp.readline()
1886 l = fp.readline()
1887 try:
1887 try:
1888 total_files, total_bytes = map(int, l.split(' ', 1))
1888 total_files, total_bytes = map(int, l.split(' ', 1))
1889 except ValueError, TypeError:
1889 except ValueError, TypeError:
1890 raise util.UnexpectedOutput(
1890 raise util.UnexpectedOutput(
1891 _('Unexpected response from remote server:'), l)
1891 _('Unexpected response from remote server:'), l)
1892 self.ui.status(_('%d files to transfer, %s of data\n') %
1892 self.ui.status(_('%d files to transfer, %s of data\n') %
1893 (total_files, util.bytecount(total_bytes)))
1893 (total_files, util.bytecount(total_bytes)))
1894 start = time.time()
1894 start = time.time()
1895 for i in xrange(total_files):
1895 for i in xrange(total_files):
1896 # XXX doesn't support '\n' or '\r' in filenames
1896 # XXX doesn't support '\n' or '\r' in filenames
1897 l = fp.readline()
1897 l = fp.readline()
1898 try:
1898 try:
1899 name, size = l.split('\0', 1)
1899 name, size = l.split('\0', 1)
1900 size = int(size)
1900 size = int(size)
1901 except ValueError, TypeError:
1901 except ValueError, TypeError:
1902 raise util.UnexpectedOutput(
1902 raise util.UnexpectedOutput(
1903 _('Unexpected response from remote server:'), l)
1903 _('Unexpected response from remote server:'), l)
1904 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1904 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1905 ofp = self.sopener(name, 'w')
1905 ofp = self.sopener(name, 'w')
1906 for chunk in util.filechunkiter(fp, limit=size):
1906 for chunk in util.filechunkiter(fp, limit=size):
1907 ofp.write(chunk)
1907 ofp.write(chunk)
1908 ofp.close()
1908 ofp.close()
1909 elapsed = time.time() - start
1909 elapsed = time.time() - start
1910 if elapsed <= 0:
1910 if elapsed <= 0:
1911 elapsed = 0.001
1911 elapsed = 0.001
1912 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1912 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1913 (util.bytecount(total_bytes), elapsed,
1913 (util.bytecount(total_bytes), elapsed,
1914 util.bytecount(total_bytes / elapsed)))
1914 util.bytecount(total_bytes / elapsed)))
1915 self.reload()
1915 self.reload()
1916 return len(self.heads()) + 1
1916 return len(self.heads()) + 1
1917
1917
1918 def clone(self, remote, heads=[], stream=False):
1918 def clone(self, remote, heads=[], stream=False):
1919 '''clone remote repository.
1919 '''clone remote repository.
1920
1920
1921 keyword arguments:
1921 keyword arguments:
1922 heads: list of revs to clone (forces use of pull)
1922 heads: list of revs to clone (forces use of pull)
1923 stream: use streaming clone if possible'''
1923 stream: use streaming clone if possible'''
1924
1924
1925 # now, all clients that can request uncompressed clones can
1925 # now, all clients that can request uncompressed clones can
1926 # read repo formats supported by all servers that can serve
1926 # read repo formats supported by all servers that can serve
1927 # them.
1927 # them.
1928
1928
1929 # if revlog format changes, client will have to check version
1929 # if revlog format changes, client will have to check version
1930 # and format flags on "stream" capability, and use
1930 # and format flags on "stream" capability, and use
1931 # uncompressed only if compatible.
1931 # uncompressed only if compatible.
1932
1932
1933 if stream and not heads and remote.capable('stream'):
1933 if stream and not heads and remote.capable('stream'):
1934 return self.stream_in(remote)
1934 return self.stream_in(remote)
1935 return self.pull(remote, heads)
1935 return self.pull(remote, heads)
1936
1936
1937 # used to avoid circular references so destructors work
1937 # used to avoid circular references so destructors work
1938 def aftertrans(files):
1938 def aftertrans(files):
1939 renamefiles = [tuple(t) for t in files]
1939 renamefiles = [tuple(t) for t in files]
1940 def a():
1940 def a():
1941 for src, dest in renamefiles:
1941 for src, dest in renamefiles:
1942 util.rename(src, dest)
1942 util.rename(src, dest)
1943 return a
1943 return a
1944
1944
1945 def instance(ui, path, create):
1945 def instance(ui, path, create):
1946 return localrepository(ui, util.drop_scheme('file', path), create)
1946 return localrepository(ui, util.drop_scheme('file', path), create)
1947
1947
1948 def islocal(path):
1948 def islocal(path):
1949 return True
1949 return True
@@ -1,501 +1,501 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import errno, util, os, tempfile
10 import errno, util, os, tempfile
11
11
12 def filemerge(repo, fw, fo, wctx, mctx):
12 def filemerge(repo, fw, fo, wctx, mctx):
13 """perform a 3-way merge in the working directory
13 """perform a 3-way merge in the working directory
14
14
15 fw = filename in the working directory
15 fw = filename in the working directory
16 fo = filename in other parent
16 fo = filename in other parent
17 wctx, mctx = working and merge changecontexts
17 wctx, mctx = working and merge changecontexts
18 """
18 """
19
19
20 def temp(prefix, ctx):
20 def temp(prefix, ctx):
21 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
21 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
22 (fd, name) = tempfile.mkstemp(prefix=pre)
22 (fd, name) = tempfile.mkstemp(prefix=pre)
23 data = repo.wwritedata(ctx.path(), ctx.data())
23 data = repo.wwritedata(ctx.path(), ctx.data())
24 f = os.fdopen(fd, "wb")
24 f = os.fdopen(fd, "wb")
25 f.write(data)
25 f.write(data)
26 f.close()
26 f.close()
27 return name
27 return name
28
28
29 fcm = wctx.filectx(fw)
29 fcm = wctx.filectx(fw)
30 fco = mctx.filectx(fo)
30 fco = mctx.filectx(fo)
31
31
32 if not fco.cmp(fcm.data()): # files identical?
32 if not fco.cmp(fcm.data()): # files identical?
33 return None
33 return None
34
34
35 fca = fcm.ancestor(fco)
35 fca = fcm.ancestor(fco)
36 if not fca:
36 if not fca:
37 fca = repo.filectx(fw, fileid=nullrev)
37 fca = repo.filectx(fw, fileid=nullrev)
38 a = repo.wjoin(fw)
38 a = repo.wjoin(fw)
39 b = temp("base", fca)
39 b = temp("base", fca)
40 c = temp("other", fco)
40 c = temp("other", fco)
41
41
42 if fw != fo:
42 if fw != fo:
43 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
43 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
44 else:
44 else:
45 repo.ui.status(_("merging %s\n") % fw)
45 repo.ui.status(_("merging %s\n") % fw)
46
46
47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
48
48
49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
50 or "hgmerge")
50 or "hgmerge")
51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
52 environ={'HG_FILE': fw,
52 environ={'HG_FILE': fw,
53 'HG_MY_NODE': str(wctx.parents()[0]),
53 'HG_MY_NODE': str(wctx.parents()[0]),
54 'HG_OTHER_NODE': str(mctx)})
54 'HG_OTHER_NODE': str(mctx)})
55 if r:
55 if r:
56 repo.ui.warn(_("merging %s failed!\n") % fw)
56 repo.ui.warn(_("merging %s failed!\n") % fw)
57
57
58 os.unlink(b)
58 os.unlink(b)
59 os.unlink(c)
59 os.unlink(c)
60 return r
60 return r
61
61
62 def checkunknown(wctx, mctx):
62 def checkunknown(wctx, mctx):
63 "check for collisions between unknown files and files in mctx"
63 "check for collisions between unknown files and files in mctx"
64 man = mctx.manifest()
64 man = mctx.manifest()
65 for f in wctx.unknown():
65 for f in wctx.unknown():
66 if f in man:
66 if f in man:
67 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
67 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
68 raise util.Abort(_("untracked local file '%s' differs"\
68 raise util.Abort(_("untracked local file '%s' differs"\
69 " from remote version") % f)
69 " from remote version") % f)
70
70
71 def checkcollision(mctx):
71 def checkcollision(mctx):
72 "check for case folding collisions in the destination context"
72 "check for case folding collisions in the destination context"
73 folded = {}
73 folded = {}
74 for fn in mctx.manifest():
74 for fn in mctx.manifest():
75 fold = fn.lower()
75 fold = fn.lower()
76 if fold in folded:
76 if fold in folded:
77 raise util.Abort(_("case-folding collision between %s and %s")
77 raise util.Abort(_("case-folding collision between %s and %s")
78 % (fn, folded[fold]))
78 % (fn, folded[fold]))
79 folded[fold] = fn
79 folded[fold] = fn
80
80
81 def forgetremoved(wctx, mctx):
81 def forgetremoved(wctx, mctx):
82 """
82 """
83 Forget removed files
83 Forget removed files
84
84
85 If we're jumping between revisions (as opposed to merging), and if
85 If we're jumping between revisions (as opposed to merging), and if
86 neither the working directory nor the target rev has the file,
86 neither the working directory nor the target rev has the file,
87 then we need to remove it from the dirstate, to prevent the
87 then we need to remove it from the dirstate, to prevent the
88 dirstate from listing the file when it is no longer in the
88 dirstate from listing the file when it is no longer in the
89 manifest.
89 manifest.
90 """
90 """
91
91
92 action = []
92 action = []
93 man = mctx.manifest()
93 man = mctx.manifest()
94 for f in wctx.deleted() + wctx.removed():
94 for f in wctx.deleted() + wctx.removed():
95 if f not in man:
95 if f not in man:
96 action.append((f, "f"))
96 action.append((f, "f"))
97
97
98 return action
98 return action
99
99
100 def findcopies(repo, m1, m2, ma, limit):
100 def findcopies(repo, m1, m2, ma, limit):
101 """
101 """
102 Find moves and copies between m1 and m2 back to limit linkrev
102 Find moves and copies between m1 and m2 back to limit linkrev
103 """
103 """
104
104
105 def findold(fctx):
105 def findold(fctx):
106 "find files that path was copied from, back to linkrev limit"
106 "find files that path was copied from, back to linkrev limit"
107 old = {}
107 old = {}
108 orig = fctx.path()
108 orig = fctx.path()
109 visit = [fctx]
109 visit = [fctx]
110 while visit:
110 while visit:
111 fc = visit.pop()
111 fc = visit.pop()
112 if fc.path() != orig and fc.path() not in old:
112 if fc.path() != orig and fc.path() not in old:
113 old[fc.path()] = 1
113 old[fc.path()] = 1
114 if fc.rev() < limit:
114 if fc.rev() < limit:
115 continue
115 continue
116 visit += fc.parents()
116 visit += fc.parents()
117
117
118 old = old.keys()
118 old = old.keys()
119 old.sort()
119 old.sort()
120 return old
120 return old
121
121
122 def nonoverlap(d1, d2, d3):
122 def nonoverlap(d1, d2, d3):
123 "Return list of elements in d1 not in d2 or d3"
123 "Return list of elements in d1 not in d2 or d3"
124 l = [d for d in d1 if d not in d3 and d not in d2]
124 l = [d for d in d1 if d not in d3 and d not in d2]
125 l.sort()
125 l.sort()
126 return l
126 return l
127
127
128 def checkcopies(c, man):
128 def checkcopies(c, man):
129 '''check possible copies for filectx c'''
129 '''check possible copies for filectx c'''
130 for of in findold(c):
130 for of in findold(c):
131 if of not in man:
131 if of not in man:
132 return
132 return
133 c2 = ctx(of, man[of])
133 c2 = ctx(of, man[of])
134 ca = c.ancestor(c2)
134 ca = c.ancestor(c2)
135 if not ca: # unrelated
135 if not ca: # unrelated
136 return
136 return
137 if ca.path() == c.path() or ca.path() == c2.path():
137 if ca.path() == c.path() or ca.path() == c2.path():
138 fullcopy[c.path()] = of
138 fullcopy[c.path()] = of
139 if c == ca or c2 == ca: # no merge needed, ignore copy
139 if c == ca or c2 == ca: # no merge needed, ignore copy
140 return
140 return
141 copy[c.path()] = of
141 copy[c.path()] = of
142
142
143 def dirs(files):
143 def dirs(files):
144 d = {}
144 d = {}
145 for f in files:
145 for f in files:
146 d[os.path.dirname(f)] = True
146 d[os.path.dirname(f)] = True
147 return d
147 return d
148
148
149 if not repo.ui.configbool("merge", "followcopies", True):
149 if not repo.ui.configbool("merge", "followcopies", True):
150 return {}
150 return {}
151
151
152 # avoid silly behavior for update from empty dir
152 # avoid silly behavior for update from empty dir
153 if not m1 or not m2 or not ma:
153 if not m1 or not m2 or not ma:
154 return {}
154 return {}
155
155
156 dcopies = repo.dirstate.copies()
156 dcopies = repo.dirstate.copies()
157 copy = {}
157 copy = {}
158 fullcopy = {}
158 fullcopy = {}
159 u1 = nonoverlap(m1, m2, ma)
159 u1 = nonoverlap(m1, m2, ma)
160 u2 = nonoverlap(m2, m1, ma)
160 u2 = nonoverlap(m2, m1, ma)
161 ctx = util.cachefunc(lambda f, n: repo.filectx(f, fileid=n[:20]))
161 ctx = util.cachefunc(lambda f, n: repo.filectx(f, fileid=n[:20]))
162
162
163 for f in u1:
163 for f in u1:
164 checkcopies(ctx(dcopies.get(f, f), m1[f]), m2)
164 checkcopies(ctx(dcopies.get(f, f), m1[f]), m2)
165
165
166 for f in u2:
166 for f in u2:
167 checkcopies(ctx(f, m2[f]), m1)
167 checkcopies(ctx(f, m2[f]), m1)
168
168
169 if not fullcopy or not repo.ui.configbool("merge", "followdirs", True):
169 if not fullcopy or not repo.ui.configbool("merge", "followdirs", True):
170 return copy
170 return copy
171
171
172 # generate a directory move map
172 # generate a directory move map
173 d1, d2 = dirs(m1), dirs(m2)
173 d1, d2 = dirs(m1), dirs(m2)
174 invalid = {}
174 invalid = {}
175 dirmove = {}
175 dirmove = {}
176
176
177 for dst, src in fullcopy.items():
177 for dst, src in fullcopy.items():
178 dsrc, ddst = os.path.dirname(src), os.path.dirname(dst)
178 dsrc, ddst = os.path.dirname(src), os.path.dirname(dst)
179 if dsrc in invalid:
179 if dsrc in invalid:
180 continue
180 continue
181 elif (dsrc in d1 and ddst in d1) or (dsrc in d2 and ddst in d2):
181 elif (dsrc in d1 and ddst in d1) or (dsrc in d2 and ddst in d2):
182 invalid[dsrc] = True
182 invalid[dsrc] = True
183 elif dsrc in dirmove and dirmove[dsrc] != ddst:
183 elif dsrc in dirmove and dirmove[dsrc] != ddst:
184 invalid[dsrc] = True
184 invalid[dsrc] = True
185 del dirmove[dsrc]
185 del dirmove[dsrc]
186 else:
186 else:
187 dirmove[dsrc + "/"] = ddst + "/"
187 dirmove[dsrc + "/"] = ddst + "/"
188
188
189 del d1, d2, invalid
189 del d1, d2, invalid
190
190
191 if not dirmove:
191 if not dirmove:
192 return copy
192 return copy
193
193
194 # check unaccounted nonoverlapping files
194 # check unaccounted nonoverlapping files
195 for f in u1 + u2:
195 for f in u1 + u2:
196 if f not in fullcopy:
196 if f not in fullcopy:
197 for d in dirmove:
197 for d in dirmove:
198 if f.startswith(d):
198 if f.startswith(d):
199 copy[f] = dirmove[d] + f[len(d):]
199 copy[f] = dirmove[d] + f[len(d):]
200 break
200 break
201
201
202 return copy
202 return copy
203
203
204 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
204 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
205 """
205 """
206 Merge p1 and p2 with ancestor ma and generate merge action list
206 Merge p1 and p2 with ancestor ma and generate merge action list
207
207
208 overwrite = whether we clobber working files
208 overwrite = whether we clobber working files
209 partial = function to filter file lists
209 partial = function to filter file lists
210 """
210 """
211
211
212 repo.ui.note(_("resolving manifests\n"))
212 repo.ui.note(_("resolving manifests\n"))
213 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
213 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
214 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
214 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
215
215
216 m1 = p1.manifest()
216 m1 = p1.manifest()
217 m2 = p2.manifest()
217 m2 = p2.manifest()
218 ma = pa.manifest()
218 ma = pa.manifest()
219 backwards = (pa == p2)
219 backwards = (pa == p2)
220 action = []
220 action = []
221 copy = {}
221 copy = {}
222
222
223 def fmerge(f, f2=None, fa=None):
223 def fmerge(f, f2=None, fa=None):
224 """merge flags"""
224 """merge flags"""
225 if not f2:
225 if not f2:
226 f2 = f
226 f2 = f
227 fa = f
227 fa = f
228 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
228 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
229 if ((a^b) | (a^c)) ^ a:
229 if ((a^b) | (a^c)) ^ a:
230 return 'x'
230 return 'x'
231 a, b, c = ma.linkf(fa), m1.linkf(f), m2.linkf(f2)
231 a, b, c = ma.linkf(fa), m1.linkf(f), m2.linkf(f2)
232 if ((a^b) | (a^c)) ^ a:
232 if ((a^b) | (a^c)) ^ a:
233 return 'l'
233 return 'l'
234 return ''
234 return ''
235
235
236 def act(msg, m, f, *args):
236 def act(msg, m, f, *args):
237 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
237 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
238 action.append((f, m) + args)
238 action.append((f, m) + args)
239
239
240 if not (backwards or overwrite):
240 if not (backwards or overwrite):
241 copy = findcopies(repo, m1, m2, ma, pa.rev())
241 copy = findcopies(repo, m1, m2, ma, pa.rev())
242 copied = dict.fromkeys(copy.values())
242 copied = dict.fromkeys(copy.values())
243
243
244 # Compare manifests
244 # Compare manifests
245 for f, n in m1.iteritems():
245 for f, n in m1.iteritems():
246 if partial and not partial(f):
246 if partial and not partial(f):
247 continue
247 continue
248 if f in m2:
248 if f in m2:
249 # are files different?
249 # are files different?
250 if n != m2[f]:
250 if n != m2[f]:
251 a = ma.get(f, nullid)
251 a = ma.get(f, nullid)
252 # are both different from the ancestor?
252 # are both different from the ancestor?
253 if not overwrite and n != a and m2[f] != a:
253 if not overwrite and n != a and m2[f] != a:
254 act("versions differ", "m", f, f, f, fmerge(f), False)
254 act("versions differ", "m", f, f, f, fmerge(f), False)
255 # are we clobbering?
255 # are we clobbering?
256 # is remote's version newer?
256 # is remote's version newer?
257 # or are we going back in time and clean?
257 # or are we going back in time and clean?
258 elif overwrite or m2[f] != a or (backwards and not n[20:]):
258 elif overwrite or m2[f] != a or (backwards and not n[20:]):
259 act("remote is newer", "g", f, m2.flags(f))
259 act("remote is newer", "g", f, m2.flags(f))
260 # local is newer, not overwrite, check mode bits
260 # local is newer, not overwrite, check mode bits
261 elif fmerge(f) != m1.flags(f):
261 elif fmerge(f) != m1.flags(f):
262 act("update permissions", "e", f, m2.flags(f))
262 act("update permissions", "e", f, m2.flags(f))
263 # contents same, check mode bits
263 # contents same, check mode bits
264 elif m1.flags(f) != m2.flags(f):
264 elif m1.flags(f) != m2.flags(f):
265 if overwrite or fmerge(f) != m1.flags(f):
265 if overwrite or fmerge(f) != m1.flags(f):
266 act("update permissions", "e", f, m2.flags(f))
266 act("update permissions", "e", f, m2.flags(f))
267 elif f in copied:
267 elif f in copied:
268 continue
268 continue
269 elif f in copy:
269 elif f in copy:
270 f2 = copy[f]
270 f2 = copy[f]
271 if f2 not in m2: # directory rename
271 if f2 not in m2: # directory rename
272 act("remote renamed directory to " + f2, "d",
272 act("remote renamed directory to " + f2, "d",
273 f, None, f2, m1.flags(f))
273 f, None, f2, m1.flags(f))
274 elif f2 in m1: # case 2 A,B/B/B
274 elif f2 in m1: # case 2 A,B/B/B
275 act("local copied to " + f2, "m",
275 act("local copied to " + f2, "m",
276 f, f2, f, fmerge(f, f2, f2), False)
276 f, f2, f, fmerge(f, f2, f2), False)
277 else: # case 4,21 A/B/B
277 else: # case 4,21 A/B/B
278 act("local moved to " + f2, "m",
278 act("local moved to " + f2, "m",
279 f, f2, f, fmerge(f, f2, f2), False)
279 f, f2, f, fmerge(f, f2, f2), False)
280 elif f in ma:
280 elif f in ma:
281 if n != ma[f] and not overwrite:
281 if n != ma[f] and not overwrite:
282 if repo.ui.prompt(
282 if repo.ui.prompt(
283 (_(" local changed %s which remote deleted\n") % f) +
283 (_(" local changed %s which remote deleted\n") % f) +
284 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
284 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
285 act("prompt delete", "r", f)
285 act("prompt delete", "r", f)
286 else:
286 else:
287 act("other deleted", "r", f)
287 act("other deleted", "r", f)
288 else:
288 else:
289 # file is created on branch or in working directory
289 # file is created on branch or in working directory
290 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
290 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
291 act("remote deleted", "r", f)
291 act("remote deleted", "r", f)
292
292
293 for f, n in m2.iteritems():
293 for f, n in m2.iteritems():
294 if partial and not partial(f):
294 if partial and not partial(f):
295 continue
295 continue
296 if f in m1:
296 if f in m1:
297 continue
297 continue
298 if f in copied:
298 if f in copied:
299 continue
299 continue
300 if f in copy:
300 if f in copy:
301 f2 = copy[f]
301 f2 = copy[f]
302 if f2 not in m1: # directory rename
302 if f2 not in m1: # directory rename
303 act("local renamed directory to " + f2, "d",
303 act("local renamed directory to " + f2, "d",
304 None, f, f2, m2.flags(f))
304 None, f, f2, m2.flags(f))
305 elif f2 in m2: # rename case 1, A/A,B/A
305 elif f2 in m2: # rename case 1, A/A,B/A
306 act("remote copied to " + f, "m",
306 act("remote copied to " + f, "m",
307 f2, f, f, fmerge(f2, f, f2), False)
307 f2, f, f, fmerge(f2, f, f2), False)
308 else: # case 3,20 A/B/A
308 else: # case 3,20 A/B/A
309 act("remote moved to " + f, "m",
309 act("remote moved to " + f, "m",
310 f2, f, f, fmerge(f2, f, f2), True)
310 f2, f, f, fmerge(f2, f, f2), True)
311 elif f in ma:
311 elif f in ma:
312 if overwrite or backwards:
312 if overwrite or backwards:
313 act("recreating", "g", f, m2.flags(f))
313 act("recreating", "g", f, m2.flags(f))
314 elif n != ma[f]:
314 elif n != ma[f]:
315 if repo.ui.prompt(
315 if repo.ui.prompt(
316 (_("remote changed %s which local deleted\n") % f) +
316 (_("remote changed %s which local deleted\n") % f) +
317 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
317 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
318 act("prompt recreating", "g", f, m2.flags(f))
318 act("prompt recreating", "g", f, m2.flags(f))
319 else:
319 else:
320 act("remote created", "g", f, m2.flags(f))
320 act("remote created", "g", f, m2.flags(f))
321
321
322 return action
322 return action
323
323
324 def applyupdates(repo, action, wctx, mctx):
324 def applyupdates(repo, action, wctx, mctx):
325 "apply the merge action list to the working directory"
325 "apply the merge action list to the working directory"
326
326
327 updated, merged, removed, unresolved = 0, 0, 0, 0
327 updated, merged, removed, unresolved = 0, 0, 0, 0
328 action.sort()
328 action.sort()
329 for a in action:
329 for a in action:
330 f, m = a[:2]
330 f, m = a[:2]
331 if f and f[0] == "/":
331 if f and f[0] == "/":
332 continue
332 continue
333 if m == "r": # remove
333 if m == "r": # remove
334 repo.ui.note(_("removing %s\n") % f)
334 repo.ui.note(_("removing %s\n") % f)
335 util.audit_path(f)
335 util.audit_path(f)
336 try:
336 try:
337 util.unlink(repo.wjoin(f))
337 util.unlink(repo.wjoin(f))
338 except OSError, inst:
338 except OSError, inst:
339 if inst.errno != errno.ENOENT:
339 if inst.errno != errno.ENOENT:
340 repo.ui.warn(_("update failed to remove %s: %s!\n") %
340 repo.ui.warn(_("update failed to remove %s: %s!\n") %
341 (f, inst.strerror))
341 (f, inst.strerror))
342 removed += 1
342 removed += 1
343 elif m == "m": # merge
343 elif m == "m": # merge
344 f2, fd, flags, move = a[2:]
344 f2, fd, flags, move = a[2:]
345 r = filemerge(repo, f, f2, wctx, mctx)
345 r = filemerge(repo, f, f2, wctx, mctx)
346 if r > 0:
346 if r > 0:
347 unresolved += 1
347 unresolved += 1
348 else:
348 else:
349 if r is None:
349 if r is None:
350 updated += 1
350 updated += 1
351 else:
351 else:
352 merged += 1
352 merged += 1
353 if f != fd:
353 if f != fd:
354 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
354 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
355 repo.wwrite(fd, repo.wread(f), flags)
355 repo.wwrite(fd, repo.wread(f), flags)
356 if move:
356 if move:
357 repo.ui.debug(_("removing %s\n") % f)
357 repo.ui.debug(_("removing %s\n") % f)
358 os.unlink(repo.wjoin(f))
358 os.unlink(repo.wjoin(f))
359 util.set_exec(repo.wjoin(fd), "x" in flags)
359 util.set_exec(repo.wjoin(fd), "x" in flags)
360 elif m == "g": # get
360 elif m == "g": # get
361 flags = a[2]
361 flags = a[2]
362 repo.ui.note(_("getting %s\n") % f)
362 repo.ui.note(_("getting %s\n") % f)
363 t = mctx.filectx(f).data()
363 t = mctx.filectx(f).data()
364 repo.wwrite(f, t, flags)
364 repo.wwrite(f, t, flags)
365 updated += 1
365 updated += 1
366 elif m == "d": # directory rename
366 elif m == "d": # directory rename
367 f2, fd, flags = a[2:]
367 f2, fd, flags = a[2:]
368 if f:
368 if f:
369 repo.ui.note(_("moving %s to %s\n") % (f, fd))
369 repo.ui.note(_("moving %s to %s\n") % (f, fd))
370 t = wctx.filectx(f).data()
370 t = wctx.filectx(f).data()
371 repo.wwrite(fd, t, flags)
371 repo.wwrite(fd, t, flags)
372 util.unlink(repo.wjoin(f))
372 util.unlink(repo.wjoin(f))
373 if f2:
373 if f2:
374 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
374 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
375 t = mctx.filectx(f2).data()
375 t = mctx.filectx(f2).data()
376 repo.wwrite(fd, t, flags)
376 repo.wwrite(fd, t, flags)
377 updated += 1
377 updated += 1
378 elif m == "e": # exec
378 elif m == "e": # exec
379 flags = a[2]
379 flags = a[2]
380 util.set_exec(repo.wjoin(f), flags)
380 util.set_exec(repo.wjoin(f), flags)
381
381
382 return updated, merged, removed, unresolved
382 return updated, merged, removed, unresolved
383
383
384 def recordupdates(repo, action, branchmerge):
384 def recordupdates(repo, action, branchmerge):
385 "record merge actions to the dirstate"
385 "record merge actions to the dirstate"
386
386
387 for a in action:
387 for a in action:
388 f, m = a[:2]
388 f, m = a[:2]
389 if m == "r": # remove
389 if m == "r": # remove
390 if branchmerge:
390 if branchmerge:
391 repo.dirstate.update([f], 'r')
391 repo.dirstate.update([f], 'r')
392 else:
392 else:
393 repo.dirstate.forget([f])
393 repo.dirstate.forget([f])
394 elif m == "f": # forget
394 elif m == "f": # forget
395 repo.dirstate.forget([f])
395 repo.dirstate.forget([f])
396 elif m == "g": # get
396 elif m == "g": # get
397 if branchmerge:
397 if branchmerge:
398 repo.dirstate.update([f], 'n', st_mtime=-1)
398 repo.dirstate.update([f], 'n', st_mtime=-1)
399 else:
399 else:
400 repo.dirstate.update([f], 'n')
400 repo.dirstate.update([f], 'n')
401 elif m == "m": # merge
401 elif m == "m": # merge
402 f2, fd, flag, move = a[2:]
402 f2, fd, flag, move = a[2:]
403 if branchmerge:
403 if branchmerge:
404 # We've done a branch merge, mark this file as merged
404 # We've done a branch merge, mark this file as merged
405 # so that we properly record the merger later
405 # so that we properly record the merger later
406 repo.dirstate.update([fd], 'm')
406 repo.dirstate.update([fd], 'm')
407 if f != f2: # copy/rename
407 if f != f2: # copy/rename
408 if move:
408 if move:
409 repo.dirstate.update([f], 'r')
409 repo.dirstate.update([f], 'r')
410 if f != fd:
410 if f != fd:
411 repo.dirstate.copy(f, fd)
411 repo.dirstate.copy(f, fd)
412 else:
412 else:
413 repo.dirstate.copy(f2, fd)
413 repo.dirstate.copy(f2, fd)
414 else:
414 else:
415 # We've update-merged a locally modified file, so
415 # We've update-merged a locally modified file, so
416 # we set the dirstate to emulate a normal checkout
416 # we set the dirstate to emulate a normal checkout
417 # of that file some time in the past. Thus our
417 # of that file some time in the past. Thus our
418 # merge will appear as a normal local file
418 # merge will appear as a normal local file
419 # modification.
419 # modification.
420 repo.dirstate.update([fd], 'n', st_size=-1, st_mtime=-1)
420 repo.dirstate.update([fd], 'n', st_size=-1, st_mtime=-1)
421 if move:
421 if move:
422 repo.dirstate.forget([f])
422 repo.dirstate.forget([f])
423 elif m == "d": # directory rename
423 elif m == "d": # directory rename
424 f2, fd, flag = a[2:]
424 f2, fd, flag = a[2:]
425 if branchmerge:
425 if branchmerge:
426 repo.dirstate.update([fd], 'a')
426 repo.dirstate.update([fd], 'a')
427 if f:
427 if f:
428 repo.dirstate.update([f], 'r')
428 repo.dirstate.update([f], 'r')
429 repo.dirstate.copy(f, fd)
429 repo.dirstate.copy(f, fd)
430 if f2:
430 if f2:
431 repo.dirstate.copy(f2, fd)
431 repo.dirstate.copy(f2, fd)
432 else:
432 else:
433 repo.dirstate.update([fd], 'n')
433 repo.dirstate.update([fd], 'n')
434 if f:
434 if f:
435 repo.dirstate.forget([f])
435 repo.dirstate.forget([f])
436
436
437 def update(repo, node, branchmerge, force, partial, wlock):
437 def update(repo, node, branchmerge, force, partial, wlock):
438 """
438 """
439 Perform a merge between the working directory and the given node
439 Perform a merge between the working directory and the given node
440
440
441 branchmerge = whether to merge between branches
441 branchmerge = whether to merge between branches
442 force = whether to force branch merging or file overwriting
442 force = whether to force branch merging or file overwriting
443 partial = a function to filter file lists (dirstate not updated)
443 partial = a function to filter file lists (dirstate not updated)
444 wlock = working dir lock, if already held
444 wlock = working dir lock, if already held
445 """
445 """
446
446
447 if node is None:
448 node = "tip"
449
450 if not wlock:
447 if not wlock:
451 wlock = repo.wlock()
448 wlock = repo.wlock()
452
449
450 wc = repo.workingctx()
451 if node is None:
452 # tip of current branch
453 node = repo.branchtags()[wc.branch()]
453 overwrite = force and not branchmerge
454 overwrite = force and not branchmerge
454 forcemerge = force and branchmerge
455 forcemerge = force and branchmerge
455 wc = repo.workingctx()
456 pl = wc.parents()
456 pl = wc.parents()
457 p1, p2 = pl[0], repo.changectx(node)
457 p1, p2 = pl[0], repo.changectx(node)
458 pa = p1.ancestor(p2)
458 pa = p1.ancestor(p2)
459 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
459 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
460
460
461 ### check phase
461 ### check phase
462 if not overwrite and len(pl) > 1:
462 if not overwrite and len(pl) > 1:
463 raise util.Abort(_("outstanding uncommitted merges"))
463 raise util.Abort(_("outstanding uncommitted merges"))
464 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
464 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
465 if branchmerge:
465 if branchmerge:
466 raise util.Abort(_("there is nothing to merge, just use "
466 raise util.Abort(_("there is nothing to merge, just use "
467 "'hg update' or look at 'hg heads'"))
467 "'hg update' or look at 'hg heads'"))
468 elif not (overwrite or branchmerge):
468 elif not (overwrite or branchmerge):
469 raise util.Abort(_("update spans branches, use 'hg merge' "
469 raise util.Abort(_("update spans branches, use 'hg merge' "
470 "or 'hg update -C' to lose changes"))
470 "or 'hg update -C' to lose changes"))
471 if branchmerge and not forcemerge:
471 if branchmerge and not forcemerge:
472 if wc.files():
472 if wc.files():
473 raise util.Abort(_("outstanding uncommitted changes"))
473 raise util.Abort(_("outstanding uncommitted changes"))
474
474
475 ### calculate phase
475 ### calculate phase
476 action = []
476 action = []
477 if not force:
477 if not force:
478 checkunknown(wc, p2)
478 checkunknown(wc, p2)
479 if not util.checkfolding(repo.path):
479 if not util.checkfolding(repo.path):
480 checkcollision(p2)
480 checkcollision(p2)
481 if not branchmerge:
481 if not branchmerge:
482 action += forgetremoved(wc, p2)
482 action += forgetremoved(wc, p2)
483 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
483 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
484
484
485 ### apply phase
485 ### apply phase
486 if not branchmerge: # just jump to the new rev
486 if not branchmerge: # just jump to the new rev
487 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
487 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
488 if not partial:
488 if not partial:
489 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
489 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
490
490
491 stats = applyupdates(repo, action, wc, p2)
491 stats = applyupdates(repo, action, wc, p2)
492
492
493 if not partial:
493 if not partial:
494 recordupdates(repo, action, branchmerge)
494 recordupdates(repo, action, branchmerge)
495 repo.dirstate.setparents(fp1, fp2)
495 repo.dirstate.setparents(fp1, fp2)
496 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
496 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
497 if not branchmerge:
497 if not branchmerge:
498 repo.opener("branch", "w").write(p2.branch() + "\n")
498 repo.opener("branch", "w").write(p2.branch() + "\n")
499
499
500 return stats
500 return stats
501
501
@@ -1,379 +1,391 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 echo "[extensions]" >> $HGRCPATH
3 echo "[extensions]" >> $HGRCPATH
4 echo "mq=" >> $HGRCPATH
4 echo "mq=" >> $HGRCPATH
5
5
6 echo % help
6 echo % help
7 hg help mq
7 hg help mq
8
8
9 hg init a
9 hg init a
10 cd a
10 cd a
11 echo a > a
11 echo a > a
12 hg ci -Ama
12 hg ci -Ama
13
13
14 hg clone . ../k
14 hg clone . ../k
15
15
16 mkdir b
16 mkdir b
17 echo z > b/z
17 echo z > b/z
18 hg ci -Ama
18 hg ci -Ama
19
19
20 echo % qinit
20 echo % qinit
21
21
22 hg qinit
22 hg qinit
23
23
24 cd ..
24 cd ..
25 hg init b
25 hg init b
26
26
27 echo % -R qinit
27 echo % -R qinit
28
28
29 hg -R b qinit
29 hg -R b qinit
30
30
31 hg init c
31 hg init c
32
32
33 echo % qinit -c
33 echo % qinit -c
34
34
35 hg --cwd c qinit -c
35 hg --cwd c qinit -c
36 hg -R c/.hg/patches st
36 hg -R c/.hg/patches st
37
37
38 echo % qnew implies add
38 echo % qnew implies add
39
39
40 hg -R c qnew test.patch
40 hg -R c qnew test.patch
41 hg -R c/.hg/patches st
41 hg -R c/.hg/patches st
42
42
43 echo '% qinit; qinit -c'
43 echo '% qinit; qinit -c'
44 hg init d
44 hg init d
45 cd d
45 cd d
46 hg qinit
46 hg qinit
47 hg qinit -c
47 hg qinit -c
48 # qinit -c should create both files if they don't exist
48 # qinit -c should create both files if they don't exist
49 echo ' .hgignore:'
49 echo ' .hgignore:'
50 cat .hg/patches/.hgignore
50 cat .hg/patches/.hgignore
51 echo ' series:'
51 echo ' series:'
52 cat .hg/patches/series
52 cat .hg/patches/series
53 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
53 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
54 cd ..
54 cd ..
55
55
56 echo '% qinit; <stuff>; qinit -c'
56 echo '% qinit; <stuff>; qinit -c'
57 hg init e
57 hg init e
58 cd e
58 cd e
59 hg qnew A
59 hg qnew A
60 echo foo > foo
60 echo foo > foo
61 hg add foo
61 hg add foo
62 hg qrefresh
62 hg qrefresh
63 hg qnew B
63 hg qnew B
64 echo >> foo
64 echo >> foo
65 hg qrefresh
65 hg qrefresh
66 echo status >> .hg/patches/.hgignore
66 echo status >> .hg/patches/.hgignore
67 echo bleh >> .hg/patches/.hgignore
67 echo bleh >> .hg/patches/.hgignore
68 hg qinit -c
68 hg qinit -c
69 hg -R .hg/patches status
69 hg -R .hg/patches status
70 # qinit -c shouldn't touch these files if they already exist
70 # qinit -c shouldn't touch these files if they already exist
71 echo ' .hgignore:'
71 echo ' .hgignore:'
72 cat .hg/patches/.hgignore
72 cat .hg/patches/.hgignore
73 echo ' series:'
73 echo ' series:'
74 cat .hg/patches/series
74 cat .hg/patches/series
75 cd ..
75 cd ..
76
76
77 cd a
77 cd a
78
78
79 echo % qnew -m
79 echo % qnew -m
80
80
81 hg qnew -m 'foo bar' test.patch
81 hg qnew -m 'foo bar' test.patch
82 cat .hg/patches/test.patch
82 cat .hg/patches/test.patch
83
83
84 echo % qrefresh
84 echo % qrefresh
85
85
86 echo a >> a
86 echo a >> a
87 hg qrefresh
87 hg qrefresh
88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
91
91
92 echo % empty qrefresh
93
94 hg qrefresh -X a
95 echo 'revision:'
96 hg diff -r -2 -r -1
97 echo 'patch:'
98 cat .hg/patches/test.patch
99 echo 'working dir diff:'
100 hg diff --nodates -q
101 # restore things
102 hg qrefresh
103
92 echo % qpop
104 echo % qpop
93
105
94 hg qpop
106 hg qpop
95
107
96 echo % qpush
108 echo % qpush
97
109
98 hg qpush
110 hg qpush
99
111
100 cd ..
112 cd ..
101
113
102 echo % pop/push outside repo
114 echo % pop/push outside repo
103
115
104 hg -R a qpop
116 hg -R a qpop
105 hg -R a qpush
117 hg -R a qpush
106
118
107 cd a
119 cd a
108 hg qnew test2.patch
120 hg qnew test2.patch
109
121
110 echo % qrefresh in subdir
122 echo % qrefresh in subdir
111
123
112 cd b
124 cd b
113 echo a > a
125 echo a > a
114 hg add a
126 hg add a
115 hg qrefresh
127 hg qrefresh
116
128
117 echo % pop/push -a in subdir
129 echo % pop/push -a in subdir
118
130
119 hg qpop -a
131 hg qpop -a
120 hg --traceback qpush -a
132 hg --traceback qpush -a
121
133
122 echo % qseries
134 echo % qseries
123 hg qseries
135 hg qseries
124 hg qpop
136 hg qpop
125 hg qseries -vs
137 hg qseries -vs
126 hg qpush
138 hg qpush
127
139
128 echo % qapplied
140 echo % qapplied
129 hg qapplied
141 hg qapplied
130
142
131 echo % qtop
143 echo % qtop
132 hg qtop
144 hg qtop
133
145
134 echo % qprev
146 echo % qprev
135 hg qprev
147 hg qprev
136
148
137 echo % qnext
149 echo % qnext
138 hg qnext
150 hg qnext
139
151
140 echo % pop, qnext, qprev, qapplied
152 echo % pop, qnext, qprev, qapplied
141 hg qpop
153 hg qpop
142 hg qnext
154 hg qnext
143 hg qprev
155 hg qprev
144 hg qapplied
156 hg qapplied
145
157
146 echo % commit should fail
158 echo % commit should fail
147 hg commit
159 hg commit
148
160
149 echo % push should fail
161 echo % push should fail
150 hg push ../../k
162 hg push ../../k
151
163
152 echo % qunapplied
164 echo % qunapplied
153 hg qunapplied
165 hg qunapplied
154
166
155 echo % qpush/qpop with index
167 echo % qpush/qpop with index
156 hg qnew test1b.patch
168 hg qnew test1b.patch
157 echo 1b > 1b
169 echo 1b > 1b
158 hg add 1b
170 hg add 1b
159 hg qrefresh
171 hg qrefresh
160 hg qpush 2
172 hg qpush 2
161 hg qpop 0
173 hg qpop 0
162 hg qpush test.patch+1
174 hg qpush test.patch+1
163 hg qpush test.patch+2
175 hg qpush test.patch+2
164 hg qpop test2.patch-1
176 hg qpop test2.patch-1
165 hg qpop test2.patch-2
177 hg qpop test2.patch-2
166 hg qpush test1b.patch+1
178 hg qpush test1b.patch+1
167
179
168 echo % push should succeed
180 echo % push should succeed
169 hg qpop -a
181 hg qpop -a
170 hg push ../../k
182 hg push ../../k
171
183
172 echo % qpush/qpop error codes
184 echo % qpush/qpop error codes
173 errorcode()
185 errorcode()
174 {
186 {
175 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
187 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
176 }
188 }
177
189
178 # we want to start with some patches applied
190 # we want to start with some patches applied
179 hg qpush -a
191 hg qpush -a
180 echo " % pops all patches and succeeds"
192 echo " % pops all patches and succeeds"
181 errorcode qpop -a
193 errorcode qpop -a
182 echo " % does nothing and succeeds"
194 echo " % does nothing and succeeds"
183 errorcode qpop -a
195 errorcode qpop -a
184 echo " % fails - nothing else to pop"
196 echo " % fails - nothing else to pop"
185 errorcode qpop
197 errorcode qpop
186 echo " % pushes a patch and succeeds"
198 echo " % pushes a patch and succeeds"
187 errorcode qpush
199 errorcode qpush
188 echo " % pops a patch and succeeds"
200 echo " % pops a patch and succeeds"
189 errorcode qpop
201 errorcode qpop
190 echo " % pushes up to test1b.patch and succeeds"
202 echo " % pushes up to test1b.patch and succeeds"
191 errorcode qpush test1b.patch
203 errorcode qpush test1b.patch
192 echo " % does nothing and succeeds"
204 echo " % does nothing and succeeds"
193 errorcode qpush test1b.patch
205 errorcode qpush test1b.patch
194 echo " % does nothing and succeeds"
206 echo " % does nothing and succeeds"
195 errorcode qpop test1b.patch
207 errorcode qpop test1b.patch
196 echo " % fails - can't push to this patch"
208 echo " % fails - can't push to this patch"
197 errorcode qpush test.patch
209 errorcode qpush test.patch
198 echo " % fails - can't pop to this patch"
210 echo " % fails - can't pop to this patch"
199 errorcode qpop test2.patch
211 errorcode qpop test2.patch
200 echo " % pops up to test.patch and succeeds"
212 echo " % pops up to test.patch and succeeds"
201 errorcode qpop test.patch
213 errorcode qpop test.patch
202 echo " % pushes all patches and succeeds"
214 echo " % pushes all patches and succeeds"
203 errorcode qpush -a
215 errorcode qpush -a
204 echo " % does nothing and succeeds"
216 echo " % does nothing and succeeds"
205 errorcode qpush -a
217 errorcode qpush -a
206 echo " % fails - nothing else to push"
218 echo " % fails - nothing else to push"
207 errorcode qpush
219 errorcode qpush
208 echo " % does nothing and succeeds"
220 echo " % does nothing and succeeds"
209 errorcode qpush test2.patch
221 errorcode qpush test2.patch
210
222
211
223
212 echo % strip
224 echo % strip
213 cd ../../b
225 cd ../../b
214 echo x>x
226 echo x>x
215 hg ci -Ama
227 hg ci -Ama
216 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
228 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
217 hg unbundle .hg/strip-backup/*
229 hg unbundle .hg/strip-backup/*
218
230
219 echo '% cd b; hg qrefresh'
231 echo '% cd b; hg qrefresh'
220 hg init refresh
232 hg init refresh
221 cd refresh
233 cd refresh
222 echo a > a
234 echo a > a
223 hg ci -Ama -d'0 0'
235 hg ci -Ama -d'0 0'
224 hg qnew -mfoo foo
236 hg qnew -mfoo foo
225 echo a >> a
237 echo a >> a
226 hg qrefresh
238 hg qrefresh
227 mkdir b
239 mkdir b
228 cd b
240 cd b
229 echo f > f
241 echo f > f
230 hg add f
242 hg add f
231 hg qrefresh
243 hg qrefresh
232 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
244 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
233 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
245 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
234 echo % hg qrefresh .
246 echo % hg qrefresh .
235 hg qrefresh .
247 hg qrefresh .
236 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
248 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
237 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
249 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
238 hg status
250 hg status
239
251
240 echo % qpush failure
252 echo % qpush failure
241 cd ..
253 cd ..
242 hg qrefresh
254 hg qrefresh
243 hg qnew -mbar bar
255 hg qnew -mbar bar
244 echo foo > foo
256 echo foo > foo
245 echo bar > bar
257 echo bar > bar
246 hg add foo bar
258 hg add foo bar
247 hg qrefresh
259 hg qrefresh
248 hg qpop -a
260 hg qpop -a
249 echo bar > foo
261 echo bar > foo
250 hg qpush -a
262 hg qpush -a
251 hg st
263 hg st
252
264
253 cat >>$HGRCPATH <<EOF
265 cat >>$HGRCPATH <<EOF
254 [diff]
266 [diff]
255 git = True
267 git = True
256 EOF
268 EOF
257 cd ..
269 cd ..
258 hg init git
270 hg init git
259 cd git
271 cd git
260 hg qinit
272 hg qinit
261
273
262 hg qnew -m'new file' new
274 hg qnew -m'new file' new
263 echo foo > new
275 echo foo > new
264 chmod +x new
276 chmod +x new
265 hg add new
277 hg add new
266 hg qrefresh
278 hg qrefresh
267 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
279 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
268 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
280 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
269
281
270 hg qnew -m'copy file' copy
282 hg qnew -m'copy file' copy
271 hg cp new copy
283 hg cp new copy
272 hg qrefresh
284 hg qrefresh
273 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
285 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
274 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
286 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
275
287
276 hg qpop
288 hg qpop
277 hg qpush
289 hg qpush
278 hg qdiff
290 hg qdiff
279 cat >>$HGRCPATH <<EOF
291 cat >>$HGRCPATH <<EOF
280 [diff]
292 [diff]
281 git = False
293 git = False
282 EOF
294 EOF
283 hg qdiff --git
295 hg qdiff --git
284
296
285 cd ..
297 cd ..
286 hg init slow
298 hg init slow
287 cd slow
299 cd slow
288 hg qinit
300 hg qinit
289 echo foo > foo
301 echo foo > foo
290 hg add foo
302 hg add foo
291 hg ci -m 'add foo'
303 hg ci -m 'add foo'
292 hg qnew bar
304 hg qnew bar
293 echo bar > bar
305 echo bar > bar
294 hg add bar
306 hg add bar
295 hg mv foo baz
307 hg mv foo baz
296 hg qrefresh --git
308 hg qrefresh --git
297 hg up -C 0
309 hg up -C 0
298 echo >> foo
310 echo >> foo
299 hg ci -m 'change foo'
311 hg ci -m 'change foo'
300 hg up -C 1
312 hg up -C 1
301 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
313 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
302 cat .hg/patches/bar
314 cat .hg/patches/bar
303 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
315 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
304 hg qrefresh --git
316 hg qrefresh --git
305 cat .hg/patches/bar
317 cat .hg/patches/bar
306 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
318 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
307
319
308 echo
320 echo
309 hg up -C 1
321 hg up -C 1
310 echo >> foo
322 echo >> foo
311 hg ci -m 'change foo again'
323 hg ci -m 'change foo again'
312 hg up -C 2
324 hg up -C 2
313 hg mv bar quux
325 hg mv bar quux
314 hg mv baz bleh
326 hg mv baz bleh
315 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
327 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
316 cat .hg/patches/bar
328 cat .hg/patches/bar
317 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
329 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
318 hg mv quux fred
330 hg mv quux fred
319 hg mv bleh barney
331 hg mv bleh barney
320 hg qrefresh --git
332 hg qrefresh --git
321 cat .hg/patches/bar
333 cat .hg/patches/bar
322 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
334 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
323
335
324 echo '% strip again'
336 echo '% strip again'
325 cd ..
337 cd ..
326 hg init strip
338 hg init strip
327 cd strip
339 cd strip
328 touch foo
340 touch foo
329 hg add foo
341 hg add foo
330 hg ci -m 'add foo' -d '0 0'
342 hg ci -m 'add foo' -d '0 0'
331 echo >> foo
343 echo >> foo
332 hg ci -m 'change foo 1' -d '0 0'
344 hg ci -m 'change foo 1' -d '0 0'
333 hg up -C 0
345 hg up -C 0
334 echo 1 >> foo
346 echo 1 >> foo
335 hg ci -m 'change foo 2' -d '0 0'
347 hg ci -m 'change foo 2' -d '0 0'
336 HGMERGE=true hg merge
348 HGMERGE=true hg merge
337 hg ci -m merge -d '0 0'
349 hg ci -m merge -d '0 0'
338 hg log
350 hg log
339 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
351 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
340 hg log
352 hg log
341 cd ..
353 cd ..
342
354
343 echo '% qclone'
355 echo '% qclone'
344 qlog()
356 qlog()
345 {
357 {
346 echo 'main repo:'
358 echo 'main repo:'
347 hg log --template ' rev {rev}: {desc}\n'
359 hg log --template ' rev {rev}: {desc}\n'
348 echo 'patch repo:'
360 echo 'patch repo:'
349 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
361 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
350 }
362 }
351 hg init qclonesource
363 hg init qclonesource
352 cd qclonesource
364 cd qclonesource
353 echo foo > foo
365 echo foo > foo
354 hg add foo
366 hg add foo
355 hg ci -m 'add foo'
367 hg ci -m 'add foo'
356 hg qinit -c
368 hg qinit -c
357 hg qnew patch1
369 hg qnew patch1
358 echo bar >> foo
370 echo bar >> foo
359 hg qrefresh -m 'change foo'
371 hg qrefresh -m 'change foo'
360 hg qci -m checkpoint
372 hg qci -m checkpoint
361 qlog
373 qlog
362 cd ..
374 cd ..
363
375
364 # repo with patches applied
376 # repo with patches applied
365 hg qclone qclonesource qclonedest
377 hg qclone qclonesource qclonedest
366 cd qclonedest
378 cd qclonedest
367 qlog
379 qlog
368 cd ..
380 cd ..
369
381
370 # repo with patches unapplied
382 # repo with patches unapplied
371 cd qclonesource
383 cd qclonesource
372 hg qpop -a
384 hg qpop -a
373 qlog
385 qlog
374 cd ..
386 cd ..
375 hg qclone qclonesource qclonedest2
387 hg qclone qclonesource qclonedest2
376 cd qclonedest2
388 cd qclonedest2
377 qlog
389 qlog
378 cd ..
390 cd ..
379
391
@@ -1,413 +1,424 b''
1 % help
1 % help
2 mq extension - patch management and development
2 mq extension - patch management and development
3
3
4 This extension lets you work with a stack of patches in a Mercurial
4 This extension lets you work with a stack of patches in a Mercurial
5 repository. It manages two stacks of patches - all known patches, and
5 repository. It manages two stacks of patches - all known patches, and
6 applied patches (subset of known patches).
6 applied patches (subset of known patches).
7
7
8 Known patches are represented as patch files in the .hg/patches
8 Known patches are represented as patch files in the .hg/patches
9 directory. Applied patches are both patch files and changesets.
9 directory. Applied patches are both patch files and changesets.
10
10
11 Common tasks (use "hg help command" for more details):
11 Common tasks (use "hg help command" for more details):
12
12
13 prepare repository to work with patches qinit
13 prepare repository to work with patches qinit
14 create new patch qnew
14 create new patch qnew
15 import existing patch qimport
15 import existing patch qimport
16
16
17 print patch series qseries
17 print patch series qseries
18 print applied patches qapplied
18 print applied patches qapplied
19 print name of top applied patch qtop
19 print name of top applied patch qtop
20
20
21 add known patch to applied stack qpush
21 add known patch to applied stack qpush
22 remove patch from applied stack qpop
22 remove patch from applied stack qpop
23 refresh contents of top applied patch qrefresh
23 refresh contents of top applied patch qrefresh
24
24
25 list of commands (use "hg help -v mq" to show aliases and global options):
25 list of commands (use "hg help -v mq" to show aliases and global options):
26
26
27 qapplied print the patches already applied
27 qapplied print the patches already applied
28 qclone clone main and patch repository at same time
28 qclone clone main and patch repository at same time
29 qcommit commit changes in the queue repository
29 qcommit commit changes in the queue repository
30 qdelete remove patches from queue
30 qdelete remove patches from queue
31 qdiff diff of the current patch
31 qdiff diff of the current patch
32 qfold fold the named patches into the current patch
32 qfold fold the named patches into the current patch
33 qguard set or print guards for a patch
33 qguard set or print guards for a patch
34 qheader Print the header of the topmost or specified patch
34 qheader Print the header of the topmost or specified patch
35 qimport import a patch
35 qimport import a patch
36 qinit init a new queue repository
36 qinit init a new queue repository
37 qnew create a new patch
37 qnew create a new patch
38 qnext print the name of the next patch
38 qnext print the name of the next patch
39 qpop pop the current patch off the stack
39 qpop pop the current patch off the stack
40 qprev print the name of the previous patch
40 qprev print the name of the previous patch
41 qpush push the next patch onto the stack
41 qpush push the next patch onto the stack
42 qrefresh update the current patch
42 qrefresh update the current patch
43 qrename rename a patch
43 qrename rename a patch
44 qrestore restore the queue state saved by a rev
44 qrestore restore the queue state saved by a rev
45 qsave save current queue state
45 qsave save current queue state
46 qselect set or print guarded patches to push
46 qselect set or print guarded patches to push
47 qseries print the entire series file
47 qseries print the entire series file
48 qtop print the name of the current patch
48 qtop print the name of the current patch
49 qunapplied print the patches not yet applied
49 qunapplied print the patches not yet applied
50 strip strip a revision and all later revs on the same branch
50 strip strip a revision and all later revs on the same branch
51 adding a
51 adding a
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 adding b/z
53 adding b/z
54 % qinit
54 % qinit
55 % -R qinit
55 % -R qinit
56 % qinit -c
56 % qinit -c
57 A .hgignore
57 A .hgignore
58 A series
58 A series
59 % qnew implies add
59 % qnew implies add
60 A .hgignore
60 A .hgignore
61 A series
61 A series
62 A test.patch
62 A test.patch
63 % qinit; qinit -c
63 % qinit; qinit -c
64 .hgignore:
64 .hgignore:
65 syntax: glob
65 syntax: glob
66 status
66 status
67 guards
67 guards
68 series:
68 series:
69 abort: repository already exists!
69 abort: repository already exists!
70 % qinit; <stuff>; qinit -c
70 % qinit; <stuff>; qinit -c
71 adding A
71 adding A
72 adding B
72 adding B
73 A .hgignore
73 A .hgignore
74 A A
74 A A
75 A B
75 A B
76 A series
76 A series
77 .hgignore:
77 .hgignore:
78 status
78 status
79 bleh
79 bleh
80 series:
80 series:
81 A
81 A
82 B
82 B
83 % qnew -m
83 % qnew -m
84 foo bar
84 foo bar
85 % qrefresh
85 % qrefresh
86 foo bar
86 foo bar
87
87
88 diff -r xa
88 diff -r xa
89 --- a/a
89 --- a/a
90 +++ b/a
90 +++ b/a
91 @@ -1,1 +1,2 @@ a
91 @@ -1,1 +1,2 @@ a
92 a
92 a
93 +a
93 +a
94 % empty qrefresh
95 revision:
96 patch:
97 foo bar
98
99 working dir diff:
100 --- a/a
101 +++ b/a
102 @@ -1,1 +1,2 @@ a
103 a
104 +a
94 % qpop
105 % qpop
95 Patch queue now empty
106 Patch queue now empty
96 % qpush
107 % qpush
97 applying test.patch
108 applying test.patch
98 Now at: test.patch
109 Now at: test.patch
99 % pop/push outside repo
110 % pop/push outside repo
100 Patch queue now empty
111 Patch queue now empty
101 applying test.patch
112 applying test.patch
102 Now at: test.patch
113 Now at: test.patch
103 % qrefresh in subdir
114 % qrefresh in subdir
104 % pop/push -a in subdir
115 % pop/push -a in subdir
105 Patch queue now empty
116 Patch queue now empty
106 applying test.patch
117 applying test.patch
107 applying test2.patch
118 applying test2.patch
108 Now at: test2.patch
119 Now at: test2.patch
109 % qseries
120 % qseries
110 test.patch
121 test.patch
111 test2.patch
122 test2.patch
112 Now at: test.patch
123 Now at: test.patch
113 0 A test.patch: foo bar
124 0 A test.patch: foo bar
114 1 U test2.patch:
125 1 U test2.patch:
115 applying test2.patch
126 applying test2.patch
116 Now at: test2.patch
127 Now at: test2.patch
117 % qapplied
128 % qapplied
118 test.patch
129 test.patch
119 test2.patch
130 test2.patch
120 % qtop
131 % qtop
121 test2.patch
132 test2.patch
122 % qprev
133 % qprev
123 test.patch
134 test.patch
124 % qnext
135 % qnext
125 All patches applied
136 All patches applied
126 % pop, qnext, qprev, qapplied
137 % pop, qnext, qprev, qapplied
127 Now at: test.patch
138 Now at: test.patch
128 test2.patch
139 test2.patch
129 Only one patch applied
140 Only one patch applied
130 test.patch
141 test.patch
131 % commit should fail
142 % commit should fail
132 abort: cannot commit over an applied mq patch
143 abort: cannot commit over an applied mq patch
133 % push should fail
144 % push should fail
134 pushing to ../../k
145 pushing to ../../k
135 abort: source has mq patches applied
146 abort: source has mq patches applied
136 % qunapplied
147 % qunapplied
137 test2.patch
148 test2.patch
138 % qpush/qpop with index
149 % qpush/qpop with index
139 applying test2.patch
150 applying test2.patch
140 Now at: test2.patch
151 Now at: test2.patch
141 Now at: test.patch
152 Now at: test.patch
142 applying test1b.patch
153 applying test1b.patch
143 Now at: test1b.patch
154 Now at: test1b.patch
144 applying test2.patch
155 applying test2.patch
145 Now at: test2.patch
156 Now at: test2.patch
146 Now at: test1b.patch
157 Now at: test1b.patch
147 Now at: test.patch
158 Now at: test.patch
148 applying test1b.patch
159 applying test1b.patch
149 applying test2.patch
160 applying test2.patch
150 Now at: test2.patch
161 Now at: test2.patch
151 % push should succeed
162 % push should succeed
152 Patch queue now empty
163 Patch queue now empty
153 pushing to ../../k
164 pushing to ../../k
154 searching for changes
165 searching for changes
155 adding changesets
166 adding changesets
156 adding manifests
167 adding manifests
157 adding file changes
168 adding file changes
158 added 1 changesets with 1 changes to 1 files
169 added 1 changesets with 1 changes to 1 files
159 % qpush/qpop error codes
170 % qpush/qpop error codes
160 applying test.patch
171 applying test.patch
161 applying test1b.patch
172 applying test1b.patch
162 applying test2.patch
173 applying test2.patch
163 Now at: test2.patch
174 Now at: test2.patch
164 % pops all patches and succeeds
175 % pops all patches and succeeds
165 Patch queue now empty
176 Patch queue now empty
166 qpop -a succeeds
177 qpop -a succeeds
167 % does nothing and succeeds
178 % does nothing and succeeds
168 no patches applied
179 no patches applied
169 qpop -a succeeds
180 qpop -a succeeds
170 % fails - nothing else to pop
181 % fails - nothing else to pop
171 no patches applied
182 no patches applied
172 qpop fails
183 qpop fails
173 % pushes a patch and succeeds
184 % pushes a patch and succeeds
174 applying test.patch
185 applying test.patch
175 Now at: test.patch
186 Now at: test.patch
176 qpush succeeds
187 qpush succeeds
177 % pops a patch and succeeds
188 % pops a patch and succeeds
178 Patch queue now empty
189 Patch queue now empty
179 qpop succeeds
190 qpop succeeds
180 % pushes up to test1b.patch and succeeds
191 % pushes up to test1b.patch and succeeds
181 applying test.patch
192 applying test.patch
182 applying test1b.patch
193 applying test1b.patch
183 Now at: test1b.patch
194 Now at: test1b.patch
184 qpush test1b.patch succeeds
195 qpush test1b.patch succeeds
185 % does nothing and succeeds
196 % does nothing and succeeds
186 qpush: test1b.patch is already at the top
197 qpush: test1b.patch is already at the top
187 qpush test1b.patch succeeds
198 qpush test1b.patch succeeds
188 % does nothing and succeeds
199 % does nothing and succeeds
189 qpop: test1b.patch is already at the top
200 qpop: test1b.patch is already at the top
190 qpop test1b.patch succeeds
201 qpop test1b.patch succeeds
191 % fails - can't push to this patch
202 % fails - can't push to this patch
192 abort: cannot push to a previous patch: test.patch
203 abort: cannot push to a previous patch: test.patch
193 qpush test.patch fails
204 qpush test.patch fails
194 % fails - can't pop to this patch
205 % fails - can't pop to this patch
195 abort: patch test2.patch is not applied
206 abort: patch test2.patch is not applied
196 qpop test2.patch fails
207 qpop test2.patch fails
197 % pops up to test.patch and succeeds
208 % pops up to test.patch and succeeds
198 Now at: test.patch
209 Now at: test.patch
199 qpop test.patch succeeds
210 qpop test.patch succeeds
200 % pushes all patches and succeeds
211 % pushes all patches and succeeds
201 applying test1b.patch
212 applying test1b.patch
202 applying test2.patch
213 applying test2.patch
203 Now at: test2.patch
214 Now at: test2.patch
204 qpush -a succeeds
215 qpush -a succeeds
205 % does nothing and succeeds
216 % does nothing and succeeds
206 all patches are currently applied
217 all patches are currently applied
207 qpush -a succeeds
218 qpush -a succeeds
208 % fails - nothing else to push
219 % fails - nothing else to push
209 patch series already fully applied
220 patch series already fully applied
210 qpush fails
221 qpush fails
211 % does nothing and succeeds
222 % does nothing and succeeds
212 all patches are currently applied
223 all patches are currently applied
213 qpush test2.patch succeeds
224 qpush test2.patch succeeds
214 % strip
225 % strip
215 adding x
226 adding x
216 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
227 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
217 saving bundle to
228 saving bundle to
218 adding changesets
229 adding changesets
219 adding manifests
230 adding manifests
220 adding file changes
231 adding file changes
221 added 1 changesets with 1 changes to 1 files
232 added 1 changesets with 1 changes to 1 files
222 (run 'hg update' to get a working copy)
233 (run 'hg update' to get a working copy)
223 % cd b; hg qrefresh
234 % cd b; hg qrefresh
224 adding a
235 adding a
225 foo
236 foo
226
237
227 diff -r cb9a9f314b8b a
238 diff -r cb9a9f314b8b a
228 --- a/a
239 --- a/a
229 +++ b/a
240 +++ b/a
230 @@ -1,1 +1,2 @@ a
241 @@ -1,1 +1,2 @@ a
231 a
242 a
232 +a
243 +a
233 diff -r cb9a9f314b8b b/f
244 diff -r cb9a9f314b8b b/f
234 --- /dev/null
245 --- /dev/null
235 +++ b/b/f
246 +++ b/b/f
236 @@ -0,0 +1,1 @@
247 @@ -0,0 +1,1 @@
237 +f
248 +f
238 % hg qrefresh .
249 % hg qrefresh .
239 foo
250 foo
240
251
241 diff -r cb9a9f314b8b b/f
252 diff -r cb9a9f314b8b b/f
242 --- /dev/null
253 --- /dev/null
243 +++ b/b/f
254 +++ b/b/f
244 @@ -0,0 +1,1 @@
255 @@ -0,0 +1,1 @@
245 +f
256 +f
246 M a
257 M a
247 % qpush failure
258 % qpush failure
248 Patch queue now empty
259 Patch queue now empty
249 applying foo
260 applying foo
250 applying bar
261 applying bar
251 1 out of 1 hunk ignored -- saving rejects to file foo.rej
262 1 out of 1 hunk ignored -- saving rejects to file foo.rej
252 patch failed, unable to continue (try -v)
263 patch failed, unable to continue (try -v)
253 patch failed, rejects left in working dir
264 patch failed, rejects left in working dir
254 Errors during apply, please fix and refresh bar
265 Errors during apply, please fix and refresh bar
255 ? foo
266 ? foo
256 ? foo.rej
267 ? foo.rej
257 new file
268 new file
258
269
259 diff --git a/new b/new
270 diff --git a/new b/new
260 new file mode 100755
271 new file mode 100755
261 --- /dev/null
272 --- /dev/null
262 +++ b/new
273 +++ b/new
263 @@ -0,0 +1,1 @@
274 @@ -0,0 +1,1 @@
264 +foo
275 +foo
265 copy file
276 copy file
266
277
267 diff --git a/new b/copy
278 diff --git a/new b/copy
268 copy from new
279 copy from new
269 copy to copy
280 copy to copy
270 Now at: new
281 Now at: new
271 applying copy
282 applying copy
272 Now at: copy
283 Now at: copy
273 diff --git a/new b/copy
284 diff --git a/new b/copy
274 copy from new
285 copy from new
275 copy to copy
286 copy to copy
276 diff --git a/new b/copy
287 diff --git a/new b/copy
277 copy from new
288 copy from new
278 copy to copy
289 copy to copy
279 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
290 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
280 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
291 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
281 adding branch
292 adding branch
282 adding changesets
293 adding changesets
283 adding manifests
294 adding manifests
284 adding file changes
295 adding file changes
285 added 1 changesets with 1 changes to 1 files
296 added 1 changesets with 1 changes to 1 files
286 (run 'hg update' to get a working copy)
297 (run 'hg update' to get a working copy)
287 Patch queue now empty
298 Patch queue now empty
288 applying bar
299 applying bar
289 Now at: bar
300 Now at: bar
290 diff --git a/bar b/bar
301 diff --git a/bar b/bar
291 new file mode 100644
302 new file mode 100644
292 --- /dev/null
303 --- /dev/null
293 +++ b/bar
304 +++ b/bar
294 @@ -0,0 +1,1 @@
305 @@ -0,0 +1,1 @@
295 +bar
306 +bar
296 diff --git a/foo b/baz
307 diff --git a/foo b/baz
297 rename from foo
308 rename from foo
298 rename to baz
309 rename to baz
299 2 baz (foo)
310 2 baz (foo)
300 diff --git a/bar b/bar
311 diff --git a/bar b/bar
301 new file mode 100644
312 new file mode 100644
302 --- /dev/null
313 --- /dev/null
303 +++ b/bar
314 +++ b/bar
304 @@ -0,0 +1,1 @@
315 @@ -0,0 +1,1 @@
305 +bar
316 +bar
306 diff --git a/foo b/baz
317 diff --git a/foo b/baz
307 rename from foo
318 rename from foo
308 rename to baz
319 rename to baz
309 2 baz (foo)
320 2 baz (foo)
310
321
311 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
322 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
312 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
323 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
313 adding branch
324 adding branch
314 adding changesets
325 adding changesets
315 adding manifests
326 adding manifests
316 adding file changes
327 adding file changes
317 added 1 changesets with 1 changes to 1 files
328 added 1 changesets with 1 changes to 1 files
318 (run 'hg update' to get a working copy)
329 (run 'hg update' to get a working copy)
319 Patch queue now empty
330 Patch queue now empty
320 applying bar
331 applying bar
321 Now at: bar
332 Now at: bar
322 diff --git a/foo b/bleh
333 diff --git a/foo b/bleh
323 rename from foo
334 rename from foo
324 rename to bleh
335 rename to bleh
325 diff --git a/quux b/quux
336 diff --git a/quux b/quux
326 new file mode 100644
337 new file mode 100644
327 --- /dev/null
338 --- /dev/null
328 +++ b/quux
339 +++ b/quux
329 @@ -0,0 +1,1 @@
340 @@ -0,0 +1,1 @@
330 +bar
341 +bar
331 3 bleh (foo)
342 3 bleh (foo)
332 diff --git a/foo b/barney
343 diff --git a/foo b/barney
333 rename from foo
344 rename from foo
334 rename to barney
345 rename to barney
335 diff --git a/fred b/fred
346 diff --git a/fred b/fred
336 new file mode 100644
347 new file mode 100644
337 --- /dev/null
348 --- /dev/null
338 +++ b/fred
349 +++ b/fred
339 @@ -0,0 +1,1 @@
350 @@ -0,0 +1,1 @@
340 +bar
351 +bar
341 3 barney (foo)
352 3 barney (foo)
342 % strip again
353 % strip again
343 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
354 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
344 merging foo
355 merging foo
345 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
356 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
346 (branch merge, don't forget to commit)
357 (branch merge, don't forget to commit)
347 changeset: 3:99615015637b
358 changeset: 3:99615015637b
348 tag: tip
359 tag: tip
349 parent: 2:20cbbe65cff7
360 parent: 2:20cbbe65cff7
350 parent: 1:d2871fc282d4
361 parent: 1:d2871fc282d4
351 user: test
362 user: test
352 date: Thu Jan 01 00:00:00 1970 +0000
363 date: Thu Jan 01 00:00:00 1970 +0000
353 summary: merge
364 summary: merge
354
365
355 changeset: 2:20cbbe65cff7
366 changeset: 2:20cbbe65cff7
356 parent: 0:53245c60e682
367 parent: 0:53245c60e682
357 user: test
368 user: test
358 date: Thu Jan 01 00:00:00 1970 +0000
369 date: Thu Jan 01 00:00:00 1970 +0000
359 summary: change foo 2
370 summary: change foo 2
360
371
361 changeset: 1:d2871fc282d4
372 changeset: 1:d2871fc282d4
362 user: test
373 user: test
363 date: Thu Jan 01 00:00:00 1970 +0000
374 date: Thu Jan 01 00:00:00 1970 +0000
364 summary: change foo 1
375 summary: change foo 1
365
376
366 changeset: 0:53245c60e682
377 changeset: 0:53245c60e682
367 user: test
378 user: test
368 date: Thu Jan 01 00:00:00 1970 +0000
379 date: Thu Jan 01 00:00:00 1970 +0000
369 summary: add foo
380 summary: add foo
370
381
371 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
382 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
372 saving bundle to
383 saving bundle to
373 saving bundle to
384 saving bundle to
374 adding branch
385 adding branch
375 adding changesets
386 adding changesets
376 adding manifests
387 adding manifests
377 adding file changes
388 adding file changes
378 added 1 changesets with 1 changes to 1 files
389 added 1 changesets with 1 changes to 1 files
379 (run 'hg update' to get a working copy)
390 (run 'hg update' to get a working copy)
380 changeset: 1:20cbbe65cff7
391 changeset: 1:20cbbe65cff7
381 tag: tip
392 tag: tip
382 user: test
393 user: test
383 date: Thu Jan 01 00:00:00 1970 +0000
394 date: Thu Jan 01 00:00:00 1970 +0000
384 summary: change foo 2
395 summary: change foo 2
385
396
386 changeset: 0:53245c60e682
397 changeset: 0:53245c60e682
387 user: test
398 user: test
388 date: Thu Jan 01 00:00:00 1970 +0000
399 date: Thu Jan 01 00:00:00 1970 +0000
389 summary: add foo
400 summary: add foo
390
401
391 % qclone
402 % qclone
392 main repo:
403 main repo:
393 rev 1: change foo
404 rev 1: change foo
394 rev 0: add foo
405 rev 0: add foo
395 patch repo:
406 patch repo:
396 rev 0: checkpoint
407 rev 0: checkpoint
397 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
408 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
398 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
409 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
399 main repo:
410 main repo:
400 rev 0: add foo
411 rev 0: add foo
401 patch repo:
412 patch repo:
402 rev 0: checkpoint
413 rev 0: checkpoint
403 Patch queue now empty
414 Patch queue now empty
404 main repo:
415 main repo:
405 rev 0: add foo
416 rev 0: add foo
406 patch repo:
417 patch repo:
407 rev 0: checkpoint
418 rev 0: checkpoint
408 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
419 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
409 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
420 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
410 main repo:
421 main repo:
411 rev 0: add foo
422 rev 0: add foo
412 patch repo:
423 patch repo:
413 rev 0: checkpoint
424 rev 0: checkpoint
@@ -1,65 +1,73 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init t
3 hg init t
4 cd t
4 cd t
5 hg branches
5 hg branches
6
6
7 echo foo > a
7 echo foo > a
8 hg add a
8 hg add a
9 hg ci -m "initial" -d "1000000 0"
9 hg ci -m "initial" -d "1000000 0"
10 hg branch foo
10 hg branch foo
11 hg branch
11 hg branch
12 hg ci -m "add branch name" -d "1000000 0"
12 hg ci -m "add branch name" -d "1000000 0"
13 hg branch bar
13 hg branch bar
14 hg ci -m "change branch name" -d "1000000 0"
14 hg ci -m "change branch name" -d "1000000 0"
15 hg branch ""
15 hg branch ""
16 hg ci -m "clear branch name" -d "1000000 0"
16 hg ci -m "clear branch name" -d "1000000 0"
17
17
18 hg co foo
18 hg co foo
19 hg branch
19 hg branch
20 echo bleah > a
20 echo bleah > a
21 hg ci -m "modify a branch" -d "1000000 0"
21 hg ci -m "modify a branch" -d "1000000 0"
22
22
23 hg merge
23 hg merge
24 hg branch
24 hg branch
25 hg ci -m "merge" -d "1000000 0"
25 hg ci -m "merge" -d "1000000 0"
26 hg log
26 hg log
27
27
28 hg branches
28 hg branches
29 hg branches -q
29 hg branches -q
30
30
31 echo % test for invalid branch cache
31 echo % test for invalid branch cache
32 hg rollback
32 hg rollback
33 cp .hg/branches.cache .hg/bc-invalid
33 cp .hg/branches.cache .hg/bc-invalid
34 hg log -r foo
34 hg log -r foo
35 cp .hg/bc-invalid .hg/branches.cache
35 cp .hg/bc-invalid .hg/branches.cache
36 hg --debug log -r foo
36 hg --debug log -r foo
37 rm .hg/branches.cache
37 rm .hg/branches.cache
38 echo corrupted > .hg/branches.cache
38 echo corrupted > .hg/branches.cache
39 hg log -qr foo
39 hg log -qr foo
40 cat .hg/branches.cache
40 cat .hg/branches.cache
41
41
42 echo % test for different branch cache features
42 echo % test for different branch cache features
43 echo '4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4' > .hg/branches.cache
43 echo '4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4' > .hg/branches.cache
44 hg branches --debug
44 hg branches --debug
45 echo ' features: unnamed dummy foo bar' > .hg/branches.cache
45 echo ' features: unnamed dummy foo bar' > .hg/branches.cache
46 hg branches --debug
46 hg branches --debug
47 echo ' features: dummy' > .hg/branches.cache
47 echo ' features: dummy' > .hg/branches.cache
48 hg branches --debug
48 hg branches --debug
49
49
50 echo % test old hg reading branch cache with feature list
50 echo % test old hg reading branch cache with feature list
51 python << EOF
51 python << EOF
52 import binascii
52 import binascii
53 f = file('.hg/branches.cache')
53 f = file('.hg/branches.cache')
54 lines = f.read().split('\n')
54 lines = f.read().split('\n')
55 f.close()
55 f.close()
56 firstline = lines[0]
56 firstline = lines[0]
57 last, lrev = lines.pop(0).rstrip().split(" ", 1)
57 last, lrev = lines.pop(0).rstrip().split(" ", 1)
58 try:
58 try:
59 last, lrev = binascii.unhexlify(last), int(lrev)
59 last, lrev = binascii.unhexlify(last), int(lrev)
60 except ValueError, inst:
60 except ValueError, inst:
61 if str(inst) == "invalid literal for int():%s" % firstline:
61 if str(inst) == "invalid literal for int():%s" % firstline:
62 print "ValueError raised correctly, good."
62 print "ValueError raised correctly, good."
63 else:
63 else:
64 print "ValueError: %s" % inst
64 print "ValueError: %s" % inst
65 EOF
65 EOF
66
67 echo % update with no arguments: tipmost revision of the current branch
68 hg up -q -C 0
69 hg up -q
70 hg id
71 hg up -q 1
72 hg up -q
73 hg id
@@ -1,96 +1,99 b''
1 foo
1 foo
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 foo
3 foo
4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 (branch merge, don't forget to commit)
5 (branch merge, don't forget to commit)
6 foo
6 foo
7 changeset: 5:5f8fb06e083e
7 changeset: 5:5f8fb06e083e
8 branch: foo
8 branch: foo
9 tag: tip
9 tag: tip
10 parent: 4:4909a3732169
10 parent: 4:4909a3732169
11 parent: 3:bf1bc2f45e83
11 parent: 3:bf1bc2f45e83
12 user: test
12 user: test
13 date: Mon Jan 12 13:46:40 1970 +0000
13 date: Mon Jan 12 13:46:40 1970 +0000
14 summary: merge
14 summary: merge
15
15
16 changeset: 4:4909a3732169
16 changeset: 4:4909a3732169
17 branch: foo
17 branch: foo
18 parent: 1:b699b1cec9c2
18 parent: 1:b699b1cec9c2
19 user: test
19 user: test
20 date: Mon Jan 12 13:46:40 1970 +0000
20 date: Mon Jan 12 13:46:40 1970 +0000
21 summary: modify a branch
21 summary: modify a branch
22
22
23 changeset: 3:bf1bc2f45e83
23 changeset: 3:bf1bc2f45e83
24 user: test
24 user: test
25 date: Mon Jan 12 13:46:40 1970 +0000
25 date: Mon Jan 12 13:46:40 1970 +0000
26 summary: clear branch name
26 summary: clear branch name
27
27
28 changeset: 2:67ec16bde7f1
28 changeset: 2:67ec16bde7f1
29 branch: bar
29 branch: bar
30 user: test
30 user: test
31 date: Mon Jan 12 13:46:40 1970 +0000
31 date: Mon Jan 12 13:46:40 1970 +0000
32 summary: change branch name
32 summary: change branch name
33
33
34 changeset: 1:b699b1cec9c2
34 changeset: 1:b699b1cec9c2
35 branch: foo
35 branch: foo
36 user: test
36 user: test
37 date: Mon Jan 12 13:46:40 1970 +0000
37 date: Mon Jan 12 13:46:40 1970 +0000
38 summary: add branch name
38 summary: add branch name
39
39
40 changeset: 0:be8523e69bf8
40 changeset: 0:be8523e69bf8
41 user: test
41 user: test
42 date: Mon Jan 12 13:46:40 1970 +0000
42 date: Mon Jan 12 13:46:40 1970 +0000
43 summary: initial
43 summary: initial
44
44
45 foo 5:5f8fb06e083e
45 foo 5:5f8fb06e083e
46 3:bf1bc2f45e83
46 3:bf1bc2f45e83
47 bar 2:67ec16bde7f1
47 bar 2:67ec16bde7f1
48 foo
48 foo
49
49
50 bar
50 bar
51 % test for invalid branch cache
51 % test for invalid branch cache
52 rolling back last transaction
52 rolling back last transaction
53 changeset: 4:4909a3732169
53 changeset: 4:4909a3732169
54 branch: foo
54 branch: foo
55 tag: tip
55 tag: tip
56 parent: 1:b699b1cec9c2
56 parent: 1:b699b1cec9c2
57 user: test
57 user: test
58 date: Mon Jan 12 13:46:40 1970 +0000
58 date: Mon Jan 12 13:46:40 1970 +0000
59 summary: modify a branch
59 summary: modify a branch
60
60
61 Invalid branch cache: unknown tip
61 Invalid branch cache: unknown tip
62 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
62 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
63 branch: foo
63 branch: foo
64 tag: tip
64 tag: tip
65 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
65 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
66 parent: -1:0000000000000000000000000000000000000000
66 parent: -1:0000000000000000000000000000000000000000
67 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
67 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
68 user: test
68 user: test
69 date: Mon Jan 12 13:46:40 1970 +0000
69 date: Mon Jan 12 13:46:40 1970 +0000
70 files: a
70 files: a
71 extra: branch=foo
71 extra: branch=foo
72 description:
72 description:
73 modify a branch
73 modify a branch
74
74
75
75
76 4:4909a3732169
76 4:4909a3732169
77 features: unnamed
77 features: unnamed
78 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
78 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
79 bf1bc2f45e834c75404d0ddab57d53beab56e2f8
79 bf1bc2f45e834c75404d0ddab57d53beab56e2f8
80 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
80 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
81 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
81 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
82 % test for different branch cache features
82 % test for different branch cache features
83 branch cache: no features specified
83 branch cache: no features specified
84 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
84 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
85 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
85 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
86 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
86 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
87 branch cache: unknown features: dummy, foo, bar
87 branch cache: unknown features: dummy, foo, bar
88 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
88 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
89 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
89 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
90 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
90 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
91 branch cache: missing features: unnamed
91 branch cache: missing features: unnamed
92 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
92 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
93 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
93 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
94 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
94 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
95 % test old hg reading branch cache with feature list
95 % test old hg reading branch cache with feature list
96 ValueError raised correctly, good.
96 ValueError raised correctly, good.
97 % update with no arguments: tipmost revision of the current branch
98 bf1bc2f45e83
99 4909a3732169 (foo) tip
General Comments 0
You need to be logged in to leave comments. Login now