##// END OF EJS Templates
merge: forcefully mark files that we get from the second parent as dirty...
Alexis S. L. Carvalho -
r5210:90d9ec0d default
parent child Browse files
Show More
@@ -0,0 +1,31 b''
1 #!/bin/sh
2
3 # In the merge below, the file "foo" has the same contents in both
4 # parents, but if we look at the file-level history, we'll notice that
5 # the version in p1 is an ancestor of the version in p2. This test
6 # makes sure that we'll use the version from p2 in the manifest of the
7 # merge revision.
8
9 hg init repo
10 cd repo
11
12 echo foo > foo
13 hg ci -d '0 0' -qAm 'add foo'
14
15 echo bar >> foo
16 hg ci -d '0 0' -m 'change foo'
17
18 hg backout -d '0 0' -r tip -m 'backout changed foo'
19
20 hg up -C 0
21 touch bar
22 hg ci -d '0 0' -qAm 'add bar'
23
24 hg merge --debug
25 hg debugstate | grep foo
26 hg st -A foo
27 hg ci -d '0 0' -m 'merge'
28
29 hg manifest --debug | grep foo
30 hg debugindex .hg/store/data/foo.i
31
@@ -0,0 +1,17 b''
1 reverting foo
2 changeset 2:4d9e78aaceee backs out changeset 1:b515023e500e
3 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 resolving manifests
5 overwrite None partial False
6 ancestor bbd179dfa0a7 local 71766447bdbb+ remote 4d9e78aaceee
7 foo: remote is newer -> g
8 getting foo
9 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
10 (branch merge, don't forget to commit)
11 n 0 -2 unset foo
12 M foo
13 c6fc755d7e68f49f880599da29f15add41f42f5a 644 foo
14 rev offset length base linkrev nodeid p1 p2
15 0 0 5 0 0 2ed2a3912a0b 000000000000 000000000000
16 1 5 9 1 1 6f4310b00b9a 2ed2a3912a0b 000000000000
17 2 14 5 2 2 c6fc755d7e68 6f4310b00b9a 000000000000
@@ -1,2244 +1,2244 b''
1 # queue.py - patch queues for mercurial
1 # queue.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial import commands, cmdutil, hg, patch, revlog, util
33 from mercurial import commands, cmdutil, hg, patch, revlog, util
34 from mercurial import repair
34 from mercurial import repair
35 import os, sys, re, errno
35 import os, sys, re, errno
36
36
37 commands.norepo += " qclone qversion"
37 commands.norepo += " qclone qversion"
38
38
39 # Patch names looks like unix-file names.
39 # Patch names looks like unix-file names.
40 # They must be joinable with queue directory and result in the patch path.
40 # They must be joinable with queue directory and result in the patch path.
41 normname = util.normpath
41 normname = util.normpath
42
42
43 class statusentry:
43 class statusentry:
44 def __init__(self, rev, name=None):
44 def __init__(self, rev, name=None):
45 if not name:
45 if not name:
46 fields = rev.split(':', 1)
46 fields = rev.split(':', 1)
47 if len(fields) == 2:
47 if len(fields) == 2:
48 self.rev, self.name = fields
48 self.rev, self.name = fields
49 else:
49 else:
50 self.rev, self.name = None, None
50 self.rev, self.name = None, None
51 else:
51 else:
52 self.rev, self.name = rev, name
52 self.rev, self.name = rev, name
53
53
54 def __str__(self):
54 def __str__(self):
55 return self.rev + ':' + self.name
55 return self.rev + ':' + self.name
56
56
57 class queue:
57 class queue:
58 def __init__(self, ui, path, patchdir=None):
58 def __init__(self, ui, path, patchdir=None):
59 self.basepath = path
59 self.basepath = path
60 self.path = patchdir or os.path.join(path, "patches")
60 self.path = patchdir or os.path.join(path, "patches")
61 self.opener = util.opener(self.path)
61 self.opener = util.opener(self.path)
62 self.ui = ui
62 self.ui = ui
63 self.applied = []
63 self.applied = []
64 self.full_series = []
64 self.full_series = []
65 self.applied_dirty = 0
65 self.applied_dirty = 0
66 self.series_dirty = 0
66 self.series_dirty = 0
67 self.series_path = "series"
67 self.series_path = "series"
68 self.status_path = "status"
68 self.status_path = "status"
69 self.guards_path = "guards"
69 self.guards_path = "guards"
70 self.active_guards = None
70 self.active_guards = None
71 self.guards_dirty = False
71 self.guards_dirty = False
72 self._diffopts = None
72 self._diffopts = None
73
73
74 if os.path.exists(self.join(self.series_path)):
74 if os.path.exists(self.join(self.series_path)):
75 self.full_series = self.opener(self.series_path).read().splitlines()
75 self.full_series = self.opener(self.series_path).read().splitlines()
76 self.parse_series()
76 self.parse_series()
77
77
78 if os.path.exists(self.join(self.status_path)):
78 if os.path.exists(self.join(self.status_path)):
79 lines = self.opener(self.status_path).read().splitlines()
79 lines = self.opener(self.status_path).read().splitlines()
80 self.applied = [statusentry(l) for l in lines]
80 self.applied = [statusentry(l) for l in lines]
81
81
82 def diffopts(self):
82 def diffopts(self):
83 if self._diffopts is None:
83 if self._diffopts is None:
84 self._diffopts = patch.diffopts(self.ui)
84 self._diffopts = patch.diffopts(self.ui)
85 return self._diffopts
85 return self._diffopts
86
86
87 def join(self, *p):
87 def join(self, *p):
88 return os.path.join(self.path, *p)
88 return os.path.join(self.path, *p)
89
89
90 def find_series(self, patch):
90 def find_series(self, patch):
91 pre = re.compile("(\s*)([^#]+)")
91 pre = re.compile("(\s*)([^#]+)")
92 index = 0
92 index = 0
93 for l in self.full_series:
93 for l in self.full_series:
94 m = pre.match(l)
94 m = pre.match(l)
95 if m:
95 if m:
96 s = m.group(2)
96 s = m.group(2)
97 s = s.rstrip()
97 s = s.rstrip()
98 if s == patch:
98 if s == patch:
99 return index
99 return index
100 index += 1
100 index += 1
101 return None
101 return None
102
102
103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
104
104
105 def parse_series(self):
105 def parse_series(self):
106 self.series = []
106 self.series = []
107 self.series_guards = []
107 self.series_guards = []
108 for l in self.full_series:
108 for l in self.full_series:
109 h = l.find('#')
109 h = l.find('#')
110 if h == -1:
110 if h == -1:
111 patch = l
111 patch = l
112 comment = ''
112 comment = ''
113 elif h == 0:
113 elif h == 0:
114 continue
114 continue
115 else:
115 else:
116 patch = l[:h]
116 patch = l[:h]
117 comment = l[h:]
117 comment = l[h:]
118 patch = patch.strip()
118 patch = patch.strip()
119 if patch:
119 if patch:
120 if patch in self.series:
120 if patch in self.series:
121 raise util.Abort(_('%s appears more than once in %s') %
121 raise util.Abort(_('%s appears more than once in %s') %
122 (patch, self.join(self.series_path)))
122 (patch, self.join(self.series_path)))
123 self.series.append(patch)
123 self.series.append(patch)
124 self.series_guards.append(self.guard_re.findall(comment))
124 self.series_guards.append(self.guard_re.findall(comment))
125
125
126 def check_guard(self, guard):
126 def check_guard(self, guard):
127 bad_chars = '# \t\r\n\f'
127 bad_chars = '# \t\r\n\f'
128 first = guard[0]
128 first = guard[0]
129 for c in '-+':
129 for c in '-+':
130 if first == c:
130 if first == c:
131 return (_('guard %r starts with invalid character: %r') %
131 return (_('guard %r starts with invalid character: %r') %
132 (guard, c))
132 (guard, c))
133 for c in bad_chars:
133 for c in bad_chars:
134 if c in guard:
134 if c in guard:
135 return _('invalid character in guard %r: %r') % (guard, c)
135 return _('invalid character in guard %r: %r') % (guard, c)
136
136
137 def set_active(self, guards):
137 def set_active(self, guards):
138 for guard in guards:
138 for guard in guards:
139 bad = self.check_guard(guard)
139 bad = self.check_guard(guard)
140 if bad:
140 if bad:
141 raise util.Abort(bad)
141 raise util.Abort(bad)
142 guards = dict.fromkeys(guards).keys()
142 guards = dict.fromkeys(guards).keys()
143 guards.sort()
143 guards.sort()
144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
145 self.active_guards = guards
145 self.active_guards = guards
146 self.guards_dirty = True
146 self.guards_dirty = True
147
147
148 def active(self):
148 def active(self):
149 if self.active_guards is None:
149 if self.active_guards is None:
150 self.active_guards = []
150 self.active_guards = []
151 try:
151 try:
152 guards = self.opener(self.guards_path).read().split()
152 guards = self.opener(self.guards_path).read().split()
153 except IOError, err:
153 except IOError, err:
154 if err.errno != errno.ENOENT: raise
154 if err.errno != errno.ENOENT: raise
155 guards = []
155 guards = []
156 for i, guard in enumerate(guards):
156 for i, guard in enumerate(guards):
157 bad = self.check_guard(guard)
157 bad = self.check_guard(guard)
158 if bad:
158 if bad:
159 self.ui.warn('%s:%d: %s\n' %
159 self.ui.warn('%s:%d: %s\n' %
160 (self.join(self.guards_path), i + 1, bad))
160 (self.join(self.guards_path), i + 1, bad))
161 else:
161 else:
162 self.active_guards.append(guard)
162 self.active_guards.append(guard)
163 return self.active_guards
163 return self.active_guards
164
164
165 def set_guards(self, idx, guards):
165 def set_guards(self, idx, guards):
166 for g in guards:
166 for g in guards:
167 if len(g) < 2:
167 if len(g) < 2:
168 raise util.Abort(_('guard %r too short') % g)
168 raise util.Abort(_('guard %r too short') % g)
169 if g[0] not in '-+':
169 if g[0] not in '-+':
170 raise util.Abort(_('guard %r starts with invalid char') % g)
170 raise util.Abort(_('guard %r starts with invalid char') % g)
171 bad = self.check_guard(g[1:])
171 bad = self.check_guard(g[1:])
172 if bad:
172 if bad:
173 raise util.Abort(bad)
173 raise util.Abort(bad)
174 drop = self.guard_re.sub('', self.full_series[idx])
174 drop = self.guard_re.sub('', self.full_series[idx])
175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
176 self.parse_series()
176 self.parse_series()
177 self.series_dirty = True
177 self.series_dirty = True
178
178
179 def pushable(self, idx):
179 def pushable(self, idx):
180 if isinstance(idx, str):
180 if isinstance(idx, str):
181 idx = self.series.index(idx)
181 idx = self.series.index(idx)
182 patchguards = self.series_guards[idx]
182 patchguards = self.series_guards[idx]
183 if not patchguards:
183 if not patchguards:
184 return True, None
184 return True, None
185 default = False
185 default = False
186 guards = self.active()
186 guards = self.active()
187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
188 if exactneg:
188 if exactneg:
189 return False, exactneg[0]
189 return False, exactneg[0]
190 pos = [g for g in patchguards if g[0] == '+']
190 pos = [g for g in patchguards if g[0] == '+']
191 exactpos = [g for g in pos if g[1:] in guards]
191 exactpos = [g for g in pos if g[1:] in guards]
192 if pos:
192 if pos:
193 if exactpos:
193 if exactpos:
194 return True, exactpos[0]
194 return True, exactpos[0]
195 return False, pos
195 return False, pos
196 return True, ''
196 return True, ''
197
197
198 def explain_pushable(self, idx, all_patches=False):
198 def explain_pushable(self, idx, all_patches=False):
199 write = all_patches and self.ui.write or self.ui.warn
199 write = all_patches and self.ui.write or self.ui.warn
200 if all_patches or self.ui.verbose:
200 if all_patches or self.ui.verbose:
201 if isinstance(idx, str):
201 if isinstance(idx, str):
202 idx = self.series.index(idx)
202 idx = self.series.index(idx)
203 pushable, why = self.pushable(idx)
203 pushable, why = self.pushable(idx)
204 if all_patches and pushable:
204 if all_patches and pushable:
205 if why is None:
205 if why is None:
206 write(_('allowing %s - no guards in effect\n') %
206 write(_('allowing %s - no guards in effect\n') %
207 self.series[idx])
207 self.series[idx])
208 else:
208 else:
209 if not why:
209 if not why:
210 write(_('allowing %s - no matching negative guards\n') %
210 write(_('allowing %s - no matching negative guards\n') %
211 self.series[idx])
211 self.series[idx])
212 else:
212 else:
213 write(_('allowing %s - guarded by %r\n') %
213 write(_('allowing %s - guarded by %r\n') %
214 (self.series[idx], why))
214 (self.series[idx], why))
215 if not pushable:
215 if not pushable:
216 if why:
216 if why:
217 write(_('skipping %s - guarded by %r\n') %
217 write(_('skipping %s - guarded by %r\n') %
218 (self.series[idx], why))
218 (self.series[idx], why))
219 else:
219 else:
220 write(_('skipping %s - no matching guards\n') %
220 write(_('skipping %s - no matching guards\n') %
221 self.series[idx])
221 self.series[idx])
222
222
223 def save_dirty(self):
223 def save_dirty(self):
224 def write_list(items, path):
224 def write_list(items, path):
225 fp = self.opener(path, 'w')
225 fp = self.opener(path, 'w')
226 for i in items:
226 for i in items:
227 print >> fp, i
227 print >> fp, i
228 fp.close()
228 fp.close()
229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
230 if self.series_dirty: write_list(self.full_series, self.series_path)
230 if self.series_dirty: write_list(self.full_series, self.series_path)
231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
232
232
233 def readheaders(self, patch):
233 def readheaders(self, patch):
234 def eatdiff(lines):
234 def eatdiff(lines):
235 while lines:
235 while lines:
236 l = lines[-1]
236 l = lines[-1]
237 if (l.startswith("diff -") or
237 if (l.startswith("diff -") or
238 l.startswith("Index:") or
238 l.startswith("Index:") or
239 l.startswith("===========")):
239 l.startswith("===========")):
240 del lines[-1]
240 del lines[-1]
241 else:
241 else:
242 break
242 break
243 def eatempty(lines):
243 def eatempty(lines):
244 while lines:
244 while lines:
245 l = lines[-1]
245 l = lines[-1]
246 if re.match('\s*$', l):
246 if re.match('\s*$', l):
247 del lines[-1]
247 del lines[-1]
248 else:
248 else:
249 break
249 break
250
250
251 pf = self.join(patch)
251 pf = self.join(patch)
252 message = []
252 message = []
253 comments = []
253 comments = []
254 user = None
254 user = None
255 date = None
255 date = None
256 format = None
256 format = None
257 subject = None
257 subject = None
258 diffstart = 0
258 diffstart = 0
259
259
260 for line in file(pf):
260 for line in file(pf):
261 line = line.rstrip()
261 line = line.rstrip()
262 if line.startswith('diff --git'):
262 if line.startswith('diff --git'):
263 diffstart = 2
263 diffstart = 2
264 break
264 break
265 if diffstart:
265 if diffstart:
266 if line.startswith('+++ '):
266 if line.startswith('+++ '):
267 diffstart = 2
267 diffstart = 2
268 break
268 break
269 if line.startswith("--- "):
269 if line.startswith("--- "):
270 diffstart = 1
270 diffstart = 1
271 continue
271 continue
272 elif format == "hgpatch":
272 elif format == "hgpatch":
273 # parse values when importing the result of an hg export
273 # parse values when importing the result of an hg export
274 if line.startswith("# User "):
274 if line.startswith("# User "):
275 user = line[7:]
275 user = line[7:]
276 elif line.startswith("# Date "):
276 elif line.startswith("# Date "):
277 date = line[7:]
277 date = line[7:]
278 elif not line.startswith("# ") and line:
278 elif not line.startswith("# ") and line:
279 message.append(line)
279 message.append(line)
280 format = None
280 format = None
281 elif line == '# HG changeset patch':
281 elif line == '# HG changeset patch':
282 format = "hgpatch"
282 format = "hgpatch"
283 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 elif (format != "tagdone" and (line.startswith("Subject: ") or
284 line.startswith("subject: "))):
284 line.startswith("subject: "))):
285 subject = line[9:]
285 subject = line[9:]
286 format = "tag"
286 format = "tag"
287 elif (format != "tagdone" and (line.startswith("From: ") or
287 elif (format != "tagdone" and (line.startswith("From: ") or
288 line.startswith("from: "))):
288 line.startswith("from: "))):
289 user = line[6:]
289 user = line[6:]
290 format = "tag"
290 format = "tag"
291 elif format == "tag" and line == "":
291 elif format == "tag" and line == "":
292 # when looking for tags (subject: from: etc) they
292 # when looking for tags (subject: from: etc) they
293 # end once you find a blank line in the source
293 # end once you find a blank line in the source
294 format = "tagdone"
294 format = "tagdone"
295 elif message or line:
295 elif message or line:
296 message.append(line)
296 message.append(line)
297 comments.append(line)
297 comments.append(line)
298
298
299 eatdiff(message)
299 eatdiff(message)
300 eatdiff(comments)
300 eatdiff(comments)
301 eatempty(message)
301 eatempty(message)
302 eatempty(comments)
302 eatempty(comments)
303
303
304 # make sure message isn't empty
304 # make sure message isn't empty
305 if format and format.startswith("tag") and subject:
305 if format and format.startswith("tag") and subject:
306 message.insert(0, "")
306 message.insert(0, "")
307 message.insert(0, subject)
307 message.insert(0, subject)
308 return (message, comments, user, date, diffstart > 1)
308 return (message, comments, user, date, diffstart > 1)
309
309
310 def removeundo(self, repo):
310 def removeundo(self, repo):
311 undo = repo.sjoin('undo')
311 undo = repo.sjoin('undo')
312 if not os.path.exists(undo):
312 if not os.path.exists(undo):
313 return
313 return
314 try:
314 try:
315 os.unlink(undo)
315 os.unlink(undo)
316 except OSError, inst:
316 except OSError, inst:
317 self.ui.warn('error removing undo: %s\n' % str(inst))
317 self.ui.warn('error removing undo: %s\n' % str(inst))
318
318
319 def printdiff(self, repo, node1, node2=None, files=None,
319 def printdiff(self, repo, node1, node2=None, files=None,
320 fp=None, changes=None, opts={}):
320 fp=None, changes=None, opts={}):
321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
322
322
323 patch.diff(repo, node1, node2, fns, match=matchfn,
323 patch.diff(repo, node1, node2, fns, match=matchfn,
324 fp=fp, changes=changes, opts=self.diffopts())
324 fp=fp, changes=changes, opts=self.diffopts())
325
325
326 def mergeone(self, repo, mergeq, head, patch, rev):
326 def mergeone(self, repo, mergeq, head, patch, rev):
327 # first try just applying the patch
327 # first try just applying the patch
328 (err, n) = self.apply(repo, [ patch ], update_status=False,
328 (err, n) = self.apply(repo, [ patch ], update_status=False,
329 strict=True, merge=rev)
329 strict=True, merge=rev)
330
330
331 if err == 0:
331 if err == 0:
332 return (err, n)
332 return (err, n)
333
333
334 if n is None:
334 if n is None:
335 raise util.Abort(_("apply failed for patch %s") % patch)
335 raise util.Abort(_("apply failed for patch %s") % patch)
336
336
337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
338
338
339 # apply failed, strip away that rev and merge.
339 # apply failed, strip away that rev and merge.
340 hg.clean(repo, head)
340 hg.clean(repo, head)
341 self.strip(repo, n, update=False, backup='strip')
341 self.strip(repo, n, update=False, backup='strip')
342
342
343 ctx = repo.changectx(rev)
343 ctx = repo.changectx(rev)
344 ret = hg.merge(repo, rev)
344 ret = hg.merge(repo, rev)
345 if ret:
345 if ret:
346 raise util.Abort(_("update returned %d") % ret)
346 raise util.Abort(_("update returned %d") % ret)
347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
348 if n == None:
348 if n == None:
349 raise util.Abort(_("repo commit failed"))
349 raise util.Abort(_("repo commit failed"))
350 try:
350 try:
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 except:
352 except:
353 raise util.Abort(_("unable to read %s") % patch)
353 raise util.Abort(_("unable to read %s") % patch)
354
354
355 patchf = self.opener(patch, "w")
355 patchf = self.opener(patch, "w")
356 if comments:
356 if comments:
357 comments = "\n".join(comments) + '\n\n'
357 comments = "\n".join(comments) + '\n\n'
358 patchf.write(comments)
358 patchf.write(comments)
359 self.printdiff(repo, head, n, fp=patchf)
359 self.printdiff(repo, head, n, fp=patchf)
360 patchf.close()
360 patchf.close()
361 self.removeundo(repo)
361 self.removeundo(repo)
362 return (0, n)
362 return (0, n)
363
363
364 def qparents(self, repo, rev=None):
364 def qparents(self, repo, rev=None):
365 if rev is None:
365 if rev is None:
366 (p1, p2) = repo.dirstate.parents()
366 (p1, p2) = repo.dirstate.parents()
367 if p2 == revlog.nullid:
367 if p2 == revlog.nullid:
368 return p1
368 return p1
369 if len(self.applied) == 0:
369 if len(self.applied) == 0:
370 return None
370 return None
371 return revlog.bin(self.applied[-1].rev)
371 return revlog.bin(self.applied[-1].rev)
372 pp = repo.changelog.parents(rev)
372 pp = repo.changelog.parents(rev)
373 if pp[1] != revlog.nullid:
373 if pp[1] != revlog.nullid:
374 arevs = [ x.rev for x in self.applied ]
374 arevs = [ x.rev for x in self.applied ]
375 p0 = revlog.hex(pp[0])
375 p0 = revlog.hex(pp[0])
376 p1 = revlog.hex(pp[1])
376 p1 = revlog.hex(pp[1])
377 if p0 in arevs:
377 if p0 in arevs:
378 return pp[0]
378 return pp[0]
379 if p1 in arevs:
379 if p1 in arevs:
380 return pp[1]
380 return pp[1]
381 return pp[0]
381 return pp[0]
382
382
383 def mergepatch(self, repo, mergeq, series):
383 def mergepatch(self, repo, mergeq, series):
384 if len(self.applied) == 0:
384 if len(self.applied) == 0:
385 # each of the patches merged in will have two parents. This
385 # each of the patches merged in will have two parents. This
386 # can confuse the qrefresh, qdiff, and strip code because it
386 # can confuse the qrefresh, qdiff, and strip code because it
387 # needs to know which parent is actually in the patch queue.
387 # needs to know which parent is actually in the patch queue.
388 # so, we insert a merge marker with only one parent. This way
388 # so, we insert a merge marker with only one parent. This way
389 # the first patch in the queue is never a merge patch
389 # the first patch in the queue is never a merge patch
390 #
390 #
391 pname = ".hg.patches.merge.marker"
391 pname = ".hg.patches.merge.marker"
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
393 self.removeundo(repo)
393 self.removeundo(repo)
394 self.applied.append(statusentry(revlog.hex(n), pname))
394 self.applied.append(statusentry(revlog.hex(n), pname))
395 self.applied_dirty = 1
395 self.applied_dirty = 1
396
396
397 head = self.qparents(repo)
397 head = self.qparents(repo)
398
398
399 for patch in series:
399 for patch in series:
400 patch = mergeq.lookup(patch, strict=True)
400 patch = mergeq.lookup(patch, strict=True)
401 if not patch:
401 if not patch:
402 self.ui.warn("patch %s does not exist\n" % patch)
402 self.ui.warn("patch %s does not exist\n" % patch)
403 return (1, None)
403 return (1, None)
404 pushable, reason = self.pushable(patch)
404 pushable, reason = self.pushable(patch)
405 if not pushable:
405 if not pushable:
406 self.explain_pushable(patch, all_patches=True)
406 self.explain_pushable(patch, all_patches=True)
407 continue
407 continue
408 info = mergeq.isapplied(patch)
408 info = mergeq.isapplied(patch)
409 if not info:
409 if not info:
410 self.ui.warn("patch %s is not applied\n" % patch)
410 self.ui.warn("patch %s is not applied\n" % patch)
411 return (1, None)
411 return (1, None)
412 rev = revlog.bin(info[1])
412 rev = revlog.bin(info[1])
413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
414 if head:
414 if head:
415 self.applied.append(statusentry(revlog.hex(head), patch))
415 self.applied.append(statusentry(revlog.hex(head), patch))
416 self.applied_dirty = 1
416 self.applied_dirty = 1
417 if err:
417 if err:
418 return (err, head)
418 return (err, head)
419 self.save_dirty()
419 self.save_dirty()
420 return (0, head)
420 return (0, head)
421
421
422 def patch(self, repo, patchfile):
422 def patch(self, repo, patchfile):
423 '''Apply patchfile to the working directory.
423 '''Apply patchfile to the working directory.
424 patchfile: file name of patch'''
424 patchfile: file name of patch'''
425 files = {}
425 files = {}
426 try:
426 try:
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 files=files)
428 files=files)
429 except Exception, inst:
429 except Exception, inst:
430 self.ui.note(str(inst) + '\n')
430 self.ui.note(str(inst) + '\n')
431 if not self.ui.verbose:
431 if not self.ui.verbose:
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 return (False, files, False)
433 return (False, files, False)
434
434
435 return (True, files, fuzz)
435 return (True, files, fuzz)
436
436
437 def apply(self, repo, series, list=False, update_status=True,
437 def apply(self, repo, series, list=False, update_status=True,
438 strict=False, patchdir=None, merge=None, all_files={}):
438 strict=False, patchdir=None, merge=None, all_files={}):
439 wlock = lock = tr = None
439 wlock = lock = tr = None
440 try:
440 try:
441 wlock = repo.wlock()
441 wlock = repo.wlock()
442 lock = repo.lock()
442 lock = repo.lock()
443 tr = repo.transaction()
443 tr = repo.transaction()
444 try:
444 try:
445 ret = self._apply(repo, series, list, update_status,
445 ret = self._apply(repo, series, list, update_status,
446 strict, patchdir, merge, all_files=all_files)
446 strict, patchdir, merge, all_files=all_files)
447 tr.close()
447 tr.close()
448 self.save_dirty()
448 self.save_dirty()
449 return ret
449 return ret
450 except:
450 except:
451 try:
451 try:
452 tr.abort()
452 tr.abort()
453 finally:
453 finally:
454 repo.invalidate()
454 repo.invalidate()
455 repo.dirstate.invalidate()
455 repo.dirstate.invalidate()
456 raise
456 raise
457 finally:
457 finally:
458 del tr, lock, wlock
458 del tr, lock, wlock
459
459
460 def _apply(self, repo, series, list=False, update_status=True,
460 def _apply(self, repo, series, list=False, update_status=True,
461 strict=False, patchdir=None, merge=None, all_files={}):
461 strict=False, patchdir=None, merge=None, all_files={}):
462 # TODO unify with commands.py
462 # TODO unify with commands.py
463 if not patchdir:
463 if not patchdir:
464 patchdir = self.path
464 patchdir = self.path
465 err = 0
465 err = 0
466 n = None
466 n = None
467 for patchname in series:
467 for patchname in series:
468 pushable, reason = self.pushable(patchname)
468 pushable, reason = self.pushable(patchname)
469 if not pushable:
469 if not pushable:
470 self.explain_pushable(patchname, all_patches=True)
470 self.explain_pushable(patchname, all_patches=True)
471 continue
471 continue
472 self.ui.warn("applying %s\n" % patchname)
472 self.ui.warn("applying %s\n" % patchname)
473 pf = os.path.join(patchdir, patchname)
473 pf = os.path.join(patchdir, patchname)
474
474
475 try:
475 try:
476 message, comments, user, date, patchfound = self.readheaders(patchname)
476 message, comments, user, date, patchfound = self.readheaders(patchname)
477 except:
477 except:
478 self.ui.warn("Unable to read %s\n" % patchname)
478 self.ui.warn("Unable to read %s\n" % patchname)
479 err = 1
479 err = 1
480 break
480 break
481
481
482 if not message:
482 if not message:
483 message = "imported patch %s\n" % patchname
483 message = "imported patch %s\n" % patchname
484 else:
484 else:
485 if list:
485 if list:
486 message.append("\nimported patch %s" % patchname)
486 message.append("\nimported patch %s" % patchname)
487 message = '\n'.join(message)
487 message = '\n'.join(message)
488
488
489 (patcherr, files, fuzz) = self.patch(repo, pf)
489 (patcherr, files, fuzz) = self.patch(repo, pf)
490 all_files.update(files)
490 all_files.update(files)
491 patcherr = not patcherr
491 patcherr = not patcherr
492
492
493 if merge and files:
493 if merge and files:
494 # Mark as removed/merged and update dirstate parent info
494 # Mark as removed/merged and update dirstate parent info
495 removed = []
495 removed = []
496 merged = []
496 merged = []
497 for f in files:
497 for f in files:
498 if os.path.exists(repo.wjoin(f)):
498 if os.path.exists(repo.wjoin(f)):
499 merged.append(f)
499 merged.append(f)
500 else:
500 else:
501 removed.append(f)
501 removed.append(f)
502 for f in removed:
502 for f in removed:
503 repo.dirstate.remove(f)
503 repo.dirstate.remove(f)
504 for f in merged:
504 for f in merged:
505 repo.dirstate.merge(f)
505 repo.dirstate.merge(f)
506 p1, p2 = repo.dirstate.parents()
506 p1, p2 = repo.dirstate.parents()
507 repo.dirstate.setparents(p1, merge)
507 repo.dirstate.setparents(p1, merge)
508 files = patch.updatedir(self.ui, repo, files)
508 files = patch.updatedir(self.ui, repo, files)
509 n = repo.commit(files, message, user, date, force=1)
509 n = repo.commit(files, message, user, date, force=1)
510
510
511 if n == None:
511 if n == None:
512 raise util.Abort(_("repo commit failed"))
512 raise util.Abort(_("repo commit failed"))
513
513
514 if update_status:
514 if update_status:
515 self.applied.append(statusentry(revlog.hex(n), patchname))
515 self.applied.append(statusentry(revlog.hex(n), patchname))
516
516
517 if patcherr:
517 if patcherr:
518 if not patchfound:
518 if not patchfound:
519 self.ui.warn("patch %s is empty\n" % patchname)
519 self.ui.warn("patch %s is empty\n" % patchname)
520 err = 0
520 err = 0
521 else:
521 else:
522 self.ui.warn("patch failed, rejects left in working dir\n")
522 self.ui.warn("patch failed, rejects left in working dir\n")
523 err = 1
523 err = 1
524 break
524 break
525
525
526 if fuzz and strict:
526 if fuzz and strict:
527 self.ui.warn("fuzz found when applying patch, stopping\n")
527 self.ui.warn("fuzz found when applying patch, stopping\n")
528 err = 1
528 err = 1
529 break
529 break
530 self.removeundo(repo)
530 self.removeundo(repo)
531 return (err, n)
531 return (err, n)
532
532
533 def delete(self, repo, patches, opts):
533 def delete(self, repo, patches, opts):
534 if not patches and not opts.get('rev'):
534 if not patches and not opts.get('rev'):
535 raise util.Abort(_('qdelete requires at least one revision or '
535 raise util.Abort(_('qdelete requires at least one revision or '
536 'patch name'))
536 'patch name'))
537
537
538 realpatches = []
538 realpatches = []
539 for patch in patches:
539 for patch in patches:
540 patch = self.lookup(patch, strict=True)
540 patch = self.lookup(patch, strict=True)
541 info = self.isapplied(patch)
541 info = self.isapplied(patch)
542 if info:
542 if info:
543 raise util.Abort(_("cannot delete applied patch %s") % patch)
543 raise util.Abort(_("cannot delete applied patch %s") % patch)
544 if patch not in self.series:
544 if patch not in self.series:
545 raise util.Abort(_("patch %s not in series file") % patch)
545 raise util.Abort(_("patch %s not in series file") % patch)
546 realpatches.append(patch)
546 realpatches.append(patch)
547
547
548 appliedbase = 0
548 appliedbase = 0
549 if opts.get('rev'):
549 if opts.get('rev'):
550 if not self.applied:
550 if not self.applied:
551 raise util.Abort(_('no patches applied'))
551 raise util.Abort(_('no patches applied'))
552 revs = cmdutil.revrange(repo, opts['rev'])
552 revs = cmdutil.revrange(repo, opts['rev'])
553 if len(revs) > 1 and revs[0] > revs[1]:
553 if len(revs) > 1 and revs[0] > revs[1]:
554 revs.reverse()
554 revs.reverse()
555 for rev in revs:
555 for rev in revs:
556 if appliedbase >= len(self.applied):
556 if appliedbase >= len(self.applied):
557 raise util.Abort(_("revision %d is not managed") % rev)
557 raise util.Abort(_("revision %d is not managed") % rev)
558
558
559 base = revlog.bin(self.applied[appliedbase].rev)
559 base = revlog.bin(self.applied[appliedbase].rev)
560 node = repo.changelog.node(rev)
560 node = repo.changelog.node(rev)
561 if node != base:
561 if node != base:
562 raise util.Abort(_("cannot delete revision %d above "
562 raise util.Abort(_("cannot delete revision %d above "
563 "applied patches") % rev)
563 "applied patches") % rev)
564 realpatches.append(self.applied[appliedbase].name)
564 realpatches.append(self.applied[appliedbase].name)
565 appliedbase += 1
565 appliedbase += 1
566
566
567 if not opts.get('keep'):
567 if not opts.get('keep'):
568 r = self.qrepo()
568 r = self.qrepo()
569 if r:
569 if r:
570 r.remove(realpatches, True)
570 r.remove(realpatches, True)
571 else:
571 else:
572 for p in realpatches:
572 for p in realpatches:
573 os.unlink(self.join(p))
573 os.unlink(self.join(p))
574
574
575 if appliedbase:
575 if appliedbase:
576 del self.applied[:appliedbase]
576 del self.applied[:appliedbase]
577 self.applied_dirty = 1
577 self.applied_dirty = 1
578 indices = [self.find_series(p) for p in realpatches]
578 indices = [self.find_series(p) for p in realpatches]
579 indices.sort()
579 indices.sort()
580 for i in indices[-1::-1]:
580 for i in indices[-1::-1]:
581 del self.full_series[i]
581 del self.full_series[i]
582 self.parse_series()
582 self.parse_series()
583 self.series_dirty = 1
583 self.series_dirty = 1
584
584
585 def check_toppatch(self, repo):
585 def check_toppatch(self, repo):
586 if len(self.applied) > 0:
586 if len(self.applied) > 0:
587 top = revlog.bin(self.applied[-1].rev)
587 top = revlog.bin(self.applied[-1].rev)
588 pp = repo.dirstate.parents()
588 pp = repo.dirstate.parents()
589 if top not in pp:
589 if top not in pp:
590 raise util.Abort(_("queue top not at same revision as working directory"))
590 raise util.Abort(_("queue top not at same revision as working directory"))
591 return top
591 return top
592 return None
592 return None
593 def check_localchanges(self, repo, force=False, refresh=True):
593 def check_localchanges(self, repo, force=False, refresh=True):
594 m, a, r, d = repo.status()[:4]
594 m, a, r, d = repo.status()[:4]
595 if m or a or r or d:
595 if m or a or r or d:
596 if not force:
596 if not force:
597 if refresh:
597 if refresh:
598 raise util.Abort(_("local changes found, refresh first"))
598 raise util.Abort(_("local changes found, refresh first"))
599 else:
599 else:
600 raise util.Abort(_("local changes found"))
600 raise util.Abort(_("local changes found"))
601 return m, a, r, d
601 return m, a, r, d
602
602
603 def new(self, repo, patch, *pats, **opts):
603 def new(self, repo, patch, *pats, **opts):
604 msg = opts.get('msg')
604 msg = opts.get('msg')
605 force = opts.get('force')
605 force = opts.get('force')
606 if os.path.exists(self.join(patch)):
606 if os.path.exists(self.join(patch)):
607 raise util.Abort(_('patch "%s" already exists') % patch)
607 raise util.Abort(_('patch "%s" already exists') % patch)
608 if opts.get('include') or opts.get('exclude') or pats:
608 if opts.get('include') or opts.get('exclude') or pats:
609 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
609 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
610 m, a, r, d = repo.status(files=fns, match=match)[:4]
610 m, a, r, d = repo.status(files=fns, match=match)[:4]
611 else:
611 else:
612 m, a, r, d = self.check_localchanges(repo, force)
612 m, a, r, d = self.check_localchanges(repo, force)
613 commitfiles = m + a + r
613 commitfiles = m + a + r
614 self.check_toppatch(repo)
614 self.check_toppatch(repo)
615 wlock = repo.wlock()
615 wlock = repo.wlock()
616 try:
616 try:
617 insert = self.full_series_end()
617 insert = self.full_series_end()
618 if msg:
618 if msg:
619 n = repo.commit(commitfiles, msg, force=True)
619 n = repo.commit(commitfiles, msg, force=True)
620 else:
620 else:
621 n = repo.commit(commitfiles, "[mq]: %s" % patch, force=True)
621 n = repo.commit(commitfiles, "[mq]: %s" % patch, force=True)
622 if n == None:
622 if n == None:
623 raise util.Abort(_("repo commit failed"))
623 raise util.Abort(_("repo commit failed"))
624 self.full_series[insert:insert] = [patch]
624 self.full_series[insert:insert] = [patch]
625 self.applied.append(statusentry(revlog.hex(n), patch))
625 self.applied.append(statusentry(revlog.hex(n), patch))
626 self.parse_series()
626 self.parse_series()
627 self.series_dirty = 1
627 self.series_dirty = 1
628 self.applied_dirty = 1
628 self.applied_dirty = 1
629 p = self.opener(patch, "w")
629 p = self.opener(patch, "w")
630 if msg:
630 if msg:
631 msg = msg + "\n"
631 msg = msg + "\n"
632 p.write(msg)
632 p.write(msg)
633 p.close()
633 p.close()
634 wlock = None
634 wlock = None
635 r = self.qrepo()
635 r = self.qrepo()
636 if r: r.add([patch])
636 if r: r.add([patch])
637 if commitfiles:
637 if commitfiles:
638 self.refresh(repo, short=True, git=opts.get('git'))
638 self.refresh(repo, short=True, git=opts.get('git'))
639 self.removeundo(repo)
639 self.removeundo(repo)
640 finally:
640 finally:
641 del wlock
641 del wlock
642
642
643 def strip(self, repo, rev, update=True, backup="all"):
643 def strip(self, repo, rev, update=True, backup="all"):
644 wlock = lock = None
644 wlock = lock = None
645 try:
645 try:
646 wlock = repo.wlock()
646 wlock = repo.wlock()
647 lock = repo.lock()
647 lock = repo.lock()
648
648
649 if update:
649 if update:
650 self.check_localchanges(repo, refresh=False)
650 self.check_localchanges(repo, refresh=False)
651 urev = self.qparents(repo, rev)
651 urev = self.qparents(repo, rev)
652 hg.clean(repo, urev)
652 hg.clean(repo, urev)
653 repo.dirstate.write()
653 repo.dirstate.write()
654
654
655 self.removeundo(repo)
655 self.removeundo(repo)
656 repair.strip(self.ui, repo, rev, backup)
656 repair.strip(self.ui, repo, rev, backup)
657 finally:
657 finally:
658 del lock, wlock
658 del lock, wlock
659
659
660 def isapplied(self, patch):
660 def isapplied(self, patch):
661 """returns (index, rev, patch)"""
661 """returns (index, rev, patch)"""
662 for i in xrange(len(self.applied)):
662 for i in xrange(len(self.applied)):
663 a = self.applied[i]
663 a = self.applied[i]
664 if a.name == patch:
664 if a.name == patch:
665 return (i, a.rev, a.name)
665 return (i, a.rev, a.name)
666 return None
666 return None
667
667
668 # if the exact patch name does not exist, we try a few
668 # if the exact patch name does not exist, we try a few
669 # variations. If strict is passed, we try only #1
669 # variations. If strict is passed, we try only #1
670 #
670 #
671 # 1) a number to indicate an offset in the series file
671 # 1) a number to indicate an offset in the series file
672 # 2) a unique substring of the patch name was given
672 # 2) a unique substring of the patch name was given
673 # 3) patchname[-+]num to indicate an offset in the series file
673 # 3) patchname[-+]num to indicate an offset in the series file
674 def lookup(self, patch, strict=False):
674 def lookup(self, patch, strict=False):
675 patch = patch and str(patch)
675 patch = patch and str(patch)
676
676
677 def partial_name(s):
677 def partial_name(s):
678 if s in self.series:
678 if s in self.series:
679 return s
679 return s
680 matches = [x for x in self.series if s in x]
680 matches = [x for x in self.series if s in x]
681 if len(matches) > 1:
681 if len(matches) > 1:
682 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
682 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
683 for m in matches:
683 for m in matches:
684 self.ui.warn(' %s\n' % m)
684 self.ui.warn(' %s\n' % m)
685 return None
685 return None
686 if matches:
686 if matches:
687 return matches[0]
687 return matches[0]
688 if len(self.series) > 0 and len(self.applied) > 0:
688 if len(self.series) > 0 and len(self.applied) > 0:
689 if s == 'qtip':
689 if s == 'qtip':
690 return self.series[self.series_end(True)-1]
690 return self.series[self.series_end(True)-1]
691 if s == 'qbase':
691 if s == 'qbase':
692 return self.series[0]
692 return self.series[0]
693 return None
693 return None
694 if patch == None:
694 if patch == None:
695 return None
695 return None
696
696
697 # we don't want to return a partial match until we make
697 # we don't want to return a partial match until we make
698 # sure the file name passed in does not exist (checked below)
698 # sure the file name passed in does not exist (checked below)
699 res = partial_name(patch)
699 res = partial_name(patch)
700 if res and res == patch:
700 if res and res == patch:
701 return res
701 return res
702
702
703 if not os.path.isfile(self.join(patch)):
703 if not os.path.isfile(self.join(patch)):
704 try:
704 try:
705 sno = int(patch)
705 sno = int(patch)
706 except(ValueError, OverflowError):
706 except(ValueError, OverflowError):
707 pass
707 pass
708 else:
708 else:
709 if sno < len(self.series):
709 if sno < len(self.series):
710 return self.series[sno]
710 return self.series[sno]
711 if not strict:
711 if not strict:
712 # return any partial match made above
712 # return any partial match made above
713 if res:
713 if res:
714 return res
714 return res
715 minus = patch.rfind('-')
715 minus = patch.rfind('-')
716 if minus >= 0:
716 if minus >= 0:
717 res = partial_name(patch[:minus])
717 res = partial_name(patch[:minus])
718 if res:
718 if res:
719 i = self.series.index(res)
719 i = self.series.index(res)
720 try:
720 try:
721 off = int(patch[minus+1:] or 1)
721 off = int(patch[minus+1:] or 1)
722 except(ValueError, OverflowError):
722 except(ValueError, OverflowError):
723 pass
723 pass
724 else:
724 else:
725 if i - off >= 0:
725 if i - off >= 0:
726 return self.series[i - off]
726 return self.series[i - off]
727 plus = patch.rfind('+')
727 plus = patch.rfind('+')
728 if plus >= 0:
728 if plus >= 0:
729 res = partial_name(patch[:plus])
729 res = partial_name(patch[:plus])
730 if res:
730 if res:
731 i = self.series.index(res)
731 i = self.series.index(res)
732 try:
732 try:
733 off = int(patch[plus+1:] or 1)
733 off = int(patch[plus+1:] or 1)
734 except(ValueError, OverflowError):
734 except(ValueError, OverflowError):
735 pass
735 pass
736 else:
736 else:
737 if i + off < len(self.series):
737 if i + off < len(self.series):
738 return self.series[i + off]
738 return self.series[i + off]
739 raise util.Abort(_("patch %s not in series") % patch)
739 raise util.Abort(_("patch %s not in series") % patch)
740
740
741 def push(self, repo, patch=None, force=False, list=False,
741 def push(self, repo, patch=None, force=False, list=False,
742 mergeq=None):
742 mergeq=None):
743 wlock = repo.wlock()
743 wlock = repo.wlock()
744 try:
744 try:
745 patch = self.lookup(patch)
745 patch = self.lookup(patch)
746 # Suppose our series file is: A B C and the current 'top'
746 # Suppose our series file is: A B C and the current 'top'
747 # patch is B. qpush C should be performed (moving forward)
747 # patch is B. qpush C should be performed (moving forward)
748 # qpush B is a NOP (no change) qpush A is an error (can't
748 # qpush B is a NOP (no change) qpush A is an error (can't
749 # go backwards with qpush)
749 # go backwards with qpush)
750 if patch:
750 if patch:
751 info = self.isapplied(patch)
751 info = self.isapplied(patch)
752 if info:
752 if info:
753 if info[0] < len(self.applied) - 1:
753 if info[0] < len(self.applied) - 1:
754 raise util.Abort(
754 raise util.Abort(
755 _("cannot push to a previous patch: %s") % patch)
755 _("cannot push to a previous patch: %s") % patch)
756 if info[0] < len(self.series) - 1:
756 if info[0] < len(self.series) - 1:
757 self.ui.warn(
757 self.ui.warn(
758 _('qpush: %s is already at the top\n') % patch)
758 _('qpush: %s is already at the top\n') % patch)
759 else:
759 else:
760 self.ui.warn(_('all patches are currently applied\n'))
760 self.ui.warn(_('all patches are currently applied\n'))
761 return
761 return
762
762
763 # Following the above example, starting at 'top' of B:
763 # Following the above example, starting at 'top' of B:
764 # qpush should be performed (pushes C), but a subsequent
764 # qpush should be performed (pushes C), but a subsequent
765 # qpush without an argument is an error (nothing to
765 # qpush without an argument is an error (nothing to
766 # apply). This allows a loop of "...while hg qpush..." to
766 # apply). This allows a loop of "...while hg qpush..." to
767 # work as it detects an error when done
767 # work as it detects an error when done
768 if self.series_end() == len(self.series):
768 if self.series_end() == len(self.series):
769 self.ui.warn(_('patch series already fully applied\n'))
769 self.ui.warn(_('patch series already fully applied\n'))
770 return 1
770 return 1
771 if not force:
771 if not force:
772 self.check_localchanges(repo)
772 self.check_localchanges(repo)
773
773
774 self.applied_dirty = 1;
774 self.applied_dirty = 1;
775 start = self.series_end()
775 start = self.series_end()
776 if start > 0:
776 if start > 0:
777 self.check_toppatch(repo)
777 self.check_toppatch(repo)
778 if not patch:
778 if not patch:
779 patch = self.series[start]
779 patch = self.series[start]
780 end = start + 1
780 end = start + 1
781 else:
781 else:
782 end = self.series.index(patch, start) + 1
782 end = self.series.index(patch, start) + 1
783 s = self.series[start:end]
783 s = self.series[start:end]
784 all_files = {}
784 all_files = {}
785 try:
785 try:
786 if mergeq:
786 if mergeq:
787 ret = self.mergepatch(repo, mergeq, s)
787 ret = self.mergepatch(repo, mergeq, s)
788 else:
788 else:
789 ret = self.apply(repo, s, list, all_files=all_files)
789 ret = self.apply(repo, s, list, all_files=all_files)
790 except:
790 except:
791 self.ui.warn(_('cleaning up working directory...'))
791 self.ui.warn(_('cleaning up working directory...'))
792 node = repo.dirstate.parents()[0]
792 node = repo.dirstate.parents()[0]
793 hg.revert(repo, node, None)
793 hg.revert(repo, node, None)
794 unknown = repo.status()[4]
794 unknown = repo.status()[4]
795 # only remove unknown files that we know we touched or
795 # only remove unknown files that we know we touched or
796 # created while patching
796 # created while patching
797 for f in unknown:
797 for f in unknown:
798 if f in all_files:
798 if f in all_files:
799 util.unlink(repo.wjoin(f))
799 util.unlink(repo.wjoin(f))
800 self.ui.warn(_('done\n'))
800 self.ui.warn(_('done\n'))
801 raise
801 raise
802 top = self.applied[-1].name
802 top = self.applied[-1].name
803 if ret[0]:
803 if ret[0]:
804 self.ui.write(
804 self.ui.write(
805 "Errors during apply, please fix and refresh %s\n" % top)
805 "Errors during apply, please fix and refresh %s\n" % top)
806 else:
806 else:
807 self.ui.write("Now at: %s\n" % top)
807 self.ui.write("Now at: %s\n" % top)
808 return ret[0]
808 return ret[0]
809 finally:
809 finally:
810 del wlock
810 del wlock
811
811
812 def pop(self, repo, patch=None, force=False, update=True, all=False):
812 def pop(self, repo, patch=None, force=False, update=True, all=False):
813 def getfile(f, rev, flags):
813 def getfile(f, rev, flags):
814 t = repo.file(f).read(rev)
814 t = repo.file(f).read(rev)
815 repo.wwrite(f, t, flags)
815 repo.wwrite(f, t, flags)
816
816
817 wlock = repo.wlock()
817 wlock = repo.wlock()
818 try:
818 try:
819 if patch:
819 if patch:
820 # index, rev, patch
820 # index, rev, patch
821 info = self.isapplied(patch)
821 info = self.isapplied(patch)
822 if not info:
822 if not info:
823 patch = self.lookup(patch)
823 patch = self.lookup(patch)
824 info = self.isapplied(patch)
824 info = self.isapplied(patch)
825 if not info:
825 if not info:
826 raise util.Abort(_("patch %s is not applied") % patch)
826 raise util.Abort(_("patch %s is not applied") % patch)
827
827
828 if len(self.applied) == 0:
828 if len(self.applied) == 0:
829 # Allow qpop -a to work repeatedly,
829 # Allow qpop -a to work repeatedly,
830 # but not qpop without an argument
830 # but not qpop without an argument
831 self.ui.warn(_("no patches applied\n"))
831 self.ui.warn(_("no patches applied\n"))
832 return not all
832 return not all
833
833
834 if not update:
834 if not update:
835 parents = repo.dirstate.parents()
835 parents = repo.dirstate.parents()
836 rr = [ revlog.bin(x.rev) for x in self.applied ]
836 rr = [ revlog.bin(x.rev) for x in self.applied ]
837 for p in parents:
837 for p in parents:
838 if p in rr:
838 if p in rr:
839 self.ui.warn("qpop: forcing dirstate update\n")
839 self.ui.warn("qpop: forcing dirstate update\n")
840 update = True
840 update = True
841
841
842 if not force and update:
842 if not force and update:
843 self.check_localchanges(repo)
843 self.check_localchanges(repo)
844
844
845 self.applied_dirty = 1;
845 self.applied_dirty = 1;
846 end = len(self.applied)
846 end = len(self.applied)
847 if not patch:
847 if not patch:
848 if all:
848 if all:
849 popi = 0
849 popi = 0
850 else:
850 else:
851 popi = len(self.applied) - 1
851 popi = len(self.applied) - 1
852 else:
852 else:
853 popi = info[0] + 1
853 popi = info[0] + 1
854 if popi >= end:
854 if popi >= end:
855 self.ui.warn("qpop: %s is already at the top\n" % patch)
855 self.ui.warn("qpop: %s is already at the top\n" % patch)
856 return
856 return
857 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
857 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
858
858
859 start = info[0]
859 start = info[0]
860 rev = revlog.bin(info[1])
860 rev = revlog.bin(info[1])
861
861
862 # we know there are no local changes, so we can make a simplified
862 # we know there are no local changes, so we can make a simplified
863 # form of hg.update.
863 # form of hg.update.
864 if update:
864 if update:
865 top = self.check_toppatch(repo)
865 top = self.check_toppatch(repo)
866 qp = self.qparents(repo, rev)
866 qp = self.qparents(repo, rev)
867 changes = repo.changelog.read(qp)
867 changes = repo.changelog.read(qp)
868 mmap = repo.manifest.read(changes[0])
868 mmap = repo.manifest.read(changes[0])
869 m, a, r, d, u = repo.status(qp, top)[:5]
869 m, a, r, d, u = repo.status(qp, top)[:5]
870 if d:
870 if d:
871 raise util.Abort("deletions found between repo revs")
871 raise util.Abort("deletions found between repo revs")
872 for f in m:
872 for f in m:
873 getfile(f, mmap[f], mmap.flags(f))
873 getfile(f, mmap[f], mmap.flags(f))
874 for f in r:
874 for f in r:
875 getfile(f, mmap[f], mmap.flags(f))
875 getfile(f, mmap[f], mmap.flags(f))
876 for f in m + r:
876 for f in m + r:
877 repo.dirstate.normal(f)
877 repo.dirstate.normal(f)
878 for f in a:
878 for f in a:
879 try:
879 try:
880 os.unlink(repo.wjoin(f))
880 os.unlink(repo.wjoin(f))
881 except OSError, e:
881 except OSError, e:
882 if e.errno != errno.ENOENT:
882 if e.errno != errno.ENOENT:
883 raise
883 raise
884 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
884 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
885 except: pass
885 except: pass
886 repo.dirstate.forget(f)
886 repo.dirstate.forget(f)
887 repo.dirstate.setparents(qp, revlog.nullid)
887 repo.dirstate.setparents(qp, revlog.nullid)
888 self.strip(repo, rev, update=False, backup='strip')
888 self.strip(repo, rev, update=False, backup='strip')
889 del self.applied[start:end]
889 del self.applied[start:end]
890 if len(self.applied):
890 if len(self.applied):
891 self.ui.write("Now at: %s\n" % self.applied[-1].name)
891 self.ui.write("Now at: %s\n" % self.applied[-1].name)
892 else:
892 else:
893 self.ui.write("Patch queue now empty\n")
893 self.ui.write("Patch queue now empty\n")
894 finally:
894 finally:
895 del wlock
895 del wlock
896
896
897 def diff(self, repo, pats, opts):
897 def diff(self, repo, pats, opts):
898 top = self.check_toppatch(repo)
898 top = self.check_toppatch(repo)
899 if not top:
899 if not top:
900 self.ui.write("No patches applied\n")
900 self.ui.write("No patches applied\n")
901 return
901 return
902 qp = self.qparents(repo, top)
902 qp = self.qparents(repo, top)
903 if opts.get('git'):
903 if opts.get('git'):
904 self.diffopts().git = True
904 self.diffopts().git = True
905 self.printdiff(repo, qp, files=pats, opts=opts)
905 self.printdiff(repo, qp, files=pats, opts=opts)
906
906
907 def refresh(self, repo, pats=None, **opts):
907 def refresh(self, repo, pats=None, **opts):
908 if len(self.applied) == 0:
908 if len(self.applied) == 0:
909 self.ui.write("No patches applied\n")
909 self.ui.write("No patches applied\n")
910 return 1
910 return 1
911 wlock = repo.wlock()
911 wlock = repo.wlock()
912 try:
912 try:
913 self.check_toppatch(repo)
913 self.check_toppatch(repo)
914 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
914 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
915 top = revlog.bin(top)
915 top = revlog.bin(top)
916 cparents = repo.changelog.parents(top)
916 cparents = repo.changelog.parents(top)
917 patchparent = self.qparents(repo, top)
917 patchparent = self.qparents(repo, top)
918 message, comments, user, date, patchfound = self.readheaders(patchfn)
918 message, comments, user, date, patchfound = self.readheaders(patchfn)
919
919
920 patchf = self.opener(patchfn, 'r+')
920 patchf = self.opener(patchfn, 'r+')
921
921
922 # if the patch was a git patch, refresh it as a git patch
922 # if the patch was a git patch, refresh it as a git patch
923 for line in patchf:
923 for line in patchf:
924 if line.startswith('diff --git'):
924 if line.startswith('diff --git'):
925 self.diffopts().git = True
925 self.diffopts().git = True
926 break
926 break
927
927
928 msg = opts.get('msg', '').rstrip()
928 msg = opts.get('msg', '').rstrip()
929 if msg:
929 if msg:
930 if comments:
930 if comments:
931 # Remove existing message.
931 # Remove existing message.
932 ci = 0
932 ci = 0
933 subj = None
933 subj = None
934 for mi in xrange(len(message)):
934 for mi in xrange(len(message)):
935 if comments[ci].lower().startswith('subject: '):
935 if comments[ci].lower().startswith('subject: '):
936 subj = comments[ci][9:]
936 subj = comments[ci][9:]
937 while message[mi] != comments[ci] and message[mi] != subj:
937 while message[mi] != comments[ci] and message[mi] != subj:
938 ci += 1
938 ci += 1
939 del comments[ci]
939 del comments[ci]
940 comments.append(msg)
940 comments.append(msg)
941
941
942 patchf.seek(0)
942 patchf.seek(0)
943 patchf.truncate()
943 patchf.truncate()
944
944
945 if comments:
945 if comments:
946 comments = "\n".join(comments) + '\n\n'
946 comments = "\n".join(comments) + '\n\n'
947 patchf.write(comments)
947 patchf.write(comments)
948
948
949 if opts.get('git'):
949 if opts.get('git'):
950 self.diffopts().git = True
950 self.diffopts().git = True
951 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
951 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
952 tip = repo.changelog.tip()
952 tip = repo.changelog.tip()
953 if top == tip:
953 if top == tip:
954 # if the top of our patch queue is also the tip, there is an
954 # if the top of our patch queue is also the tip, there is an
955 # optimization here. We update the dirstate in place and strip
955 # optimization here. We update the dirstate in place and strip
956 # off the tip commit. Then just commit the current directory
956 # off the tip commit. Then just commit the current directory
957 # tree. We can also send repo.commit the list of files
957 # tree. We can also send repo.commit the list of files
958 # changed to speed up the diff
958 # changed to speed up the diff
959 #
959 #
960 # in short mode, we only diff the files included in the
960 # in short mode, we only diff the files included in the
961 # patch already
961 # patch already
962 #
962 #
963 # this should really read:
963 # this should really read:
964 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
964 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
965 # but we do it backwards to take advantage of manifest/chlog
965 # but we do it backwards to take advantage of manifest/chlog
966 # caching against the next repo.status call
966 # caching against the next repo.status call
967 #
967 #
968 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
968 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
969 changes = repo.changelog.read(tip)
969 changes = repo.changelog.read(tip)
970 man = repo.manifest.read(changes[0])
970 man = repo.manifest.read(changes[0])
971 aaa = aa[:]
971 aaa = aa[:]
972 if opts.get('short'):
972 if opts.get('short'):
973 filelist = mm + aa + dd
973 filelist = mm + aa + dd
974 match = dict.fromkeys(filelist).__contains__
974 match = dict.fromkeys(filelist).__contains__
975 else:
975 else:
976 filelist = None
976 filelist = None
977 match = util.always
977 match = util.always
978 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
978 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
979
979
980 # we might end up with files that were added between
980 # we might end up with files that were added between
981 # tip and the dirstate parent, but then changed in the
981 # tip and the dirstate parent, but then changed in the
982 # local dirstate. in this case, we want them to only
982 # local dirstate. in this case, we want them to only
983 # show up in the added section
983 # show up in the added section
984 for x in m:
984 for x in m:
985 if x not in aa:
985 if x not in aa:
986 mm.append(x)
986 mm.append(x)
987 # we might end up with files added by the local dirstate that
987 # we might end up with files added by the local dirstate that
988 # were deleted by the patch. In this case, they should only
988 # were deleted by the patch. In this case, they should only
989 # show up in the changed section.
989 # show up in the changed section.
990 for x in a:
990 for x in a:
991 if x in dd:
991 if x in dd:
992 del dd[dd.index(x)]
992 del dd[dd.index(x)]
993 mm.append(x)
993 mm.append(x)
994 else:
994 else:
995 aa.append(x)
995 aa.append(x)
996 # make sure any files deleted in the local dirstate
996 # make sure any files deleted in the local dirstate
997 # are not in the add or change column of the patch
997 # are not in the add or change column of the patch
998 forget = []
998 forget = []
999 for x in d + r:
999 for x in d + r:
1000 if x in aa:
1000 if x in aa:
1001 del aa[aa.index(x)]
1001 del aa[aa.index(x)]
1002 forget.append(x)
1002 forget.append(x)
1003 continue
1003 continue
1004 elif x in mm:
1004 elif x in mm:
1005 del mm[mm.index(x)]
1005 del mm[mm.index(x)]
1006 dd.append(x)
1006 dd.append(x)
1007
1007
1008 m = util.unique(mm)
1008 m = util.unique(mm)
1009 r = util.unique(dd)
1009 r = util.unique(dd)
1010 a = util.unique(aa)
1010 a = util.unique(aa)
1011 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1011 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1012 filelist = util.unique(c[0] + c[1] + c[2])
1012 filelist = util.unique(c[0] + c[1] + c[2])
1013 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1013 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1014 fp=patchf, changes=c, opts=self.diffopts())
1014 fp=patchf, changes=c, opts=self.diffopts())
1015 patchf.close()
1015 patchf.close()
1016
1016
1017 repo.dirstate.setparents(*cparents)
1017 repo.dirstate.setparents(*cparents)
1018 copies = {}
1018 copies = {}
1019 for dst in a:
1019 for dst in a:
1020 src = repo.dirstate.copied(dst)
1020 src = repo.dirstate.copied(dst)
1021 if src is None:
1021 if src is None:
1022 continue
1022 continue
1023 copies.setdefault(src, []).append(dst)
1023 copies.setdefault(src, []).append(dst)
1024 repo.dirstate.add(dst)
1024 repo.dirstate.add(dst)
1025 # remember the copies between patchparent and tip
1025 # remember the copies between patchparent and tip
1026 # this may be slow, so don't do it if we're not tracking copies
1026 # this may be slow, so don't do it if we're not tracking copies
1027 if self.diffopts().git:
1027 if self.diffopts().git:
1028 for dst in aaa:
1028 for dst in aaa:
1029 f = repo.file(dst)
1029 f = repo.file(dst)
1030 src = f.renamed(man[dst])
1030 src = f.renamed(man[dst])
1031 if src:
1031 if src:
1032 copies[src[0]] = copies.get(dst, [])
1032 copies[src[0]] = copies.get(dst, [])
1033 if dst in a:
1033 if dst in a:
1034 copies[src[0]].append(dst)
1034 copies[src[0]].append(dst)
1035 # we can't copy a file created by the patch itself
1035 # we can't copy a file created by the patch itself
1036 if dst in copies:
1036 if dst in copies:
1037 del copies[dst]
1037 del copies[dst]
1038 for src, dsts in copies.iteritems():
1038 for src, dsts in copies.iteritems():
1039 for dst in dsts:
1039 for dst in dsts:
1040 repo.dirstate.copy(src, dst)
1040 repo.dirstate.copy(src, dst)
1041 for f in r:
1041 for f in r:
1042 repo.dirstate.remove(f)
1042 repo.dirstate.remove(f)
1043 # if the patch excludes a modified file, mark that
1043 # if the patch excludes a modified file, mark that
1044 # file with mtime=0 so status can see it.
1044 # file with mtime=0 so status can see it.
1045 mm = []
1045 mm = []
1046 for i in xrange(len(m)-1, -1, -1):
1046 for i in xrange(len(m)-1, -1, -1):
1047 if not matchfn(m[i]):
1047 if not matchfn(m[i]):
1048 mm.append(m[i])
1048 mm.append(m[i])
1049 del m[i]
1049 del m[i]
1050 for f in m:
1050 for f in m:
1051 repo.dirstate.normal(f)
1051 repo.dirstate.normal(f)
1052 for f in mm:
1052 for f in mm:
1053 repo.dirstate.normaldirty(f)
1053 repo.dirstate.normallookup(f)
1054 for f in forget:
1054 for f in forget:
1055 repo.dirstate.forget(f)
1055 repo.dirstate.forget(f)
1056
1056
1057 if not msg:
1057 if not msg:
1058 if not message:
1058 if not message:
1059 message = "[mq]: %s\n" % patchfn
1059 message = "[mq]: %s\n" % patchfn
1060 else:
1060 else:
1061 message = "\n".join(message)
1061 message = "\n".join(message)
1062 else:
1062 else:
1063 message = msg
1063 message = msg
1064
1064
1065 self.strip(repo, top, update=False,
1065 self.strip(repo, top, update=False,
1066 backup='strip')
1066 backup='strip')
1067 n = repo.commit(filelist, message, changes[1], match=matchfn,
1067 n = repo.commit(filelist, message, changes[1], match=matchfn,
1068 force=1)
1068 force=1)
1069 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1069 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1070 self.applied_dirty = 1
1070 self.applied_dirty = 1
1071 self.removeundo(repo)
1071 self.removeundo(repo)
1072 else:
1072 else:
1073 self.printdiff(repo, patchparent, fp=patchf)
1073 self.printdiff(repo, patchparent, fp=patchf)
1074 patchf.close()
1074 patchf.close()
1075 added = repo.status()[1]
1075 added = repo.status()[1]
1076 for a in added:
1076 for a in added:
1077 f = repo.wjoin(a)
1077 f = repo.wjoin(a)
1078 try:
1078 try:
1079 os.unlink(f)
1079 os.unlink(f)
1080 except OSError, e:
1080 except OSError, e:
1081 if e.errno != errno.ENOENT:
1081 if e.errno != errno.ENOENT:
1082 raise
1082 raise
1083 try: os.removedirs(os.path.dirname(f))
1083 try: os.removedirs(os.path.dirname(f))
1084 except: pass
1084 except: pass
1085 # forget the file copies in the dirstate
1085 # forget the file copies in the dirstate
1086 # push should readd the files later on
1086 # push should readd the files later on
1087 repo.dirstate.forget(a)
1087 repo.dirstate.forget(a)
1088 self.pop(repo, force=True)
1088 self.pop(repo, force=True)
1089 self.push(repo, force=True)
1089 self.push(repo, force=True)
1090 finally:
1090 finally:
1091 del wlock
1091 del wlock
1092
1092
1093 def init(self, repo, create=False):
1093 def init(self, repo, create=False):
1094 if not create and os.path.isdir(self.path):
1094 if not create and os.path.isdir(self.path):
1095 raise util.Abort(_("patch queue directory already exists"))
1095 raise util.Abort(_("patch queue directory already exists"))
1096 try:
1096 try:
1097 os.mkdir(self.path)
1097 os.mkdir(self.path)
1098 except OSError, inst:
1098 except OSError, inst:
1099 if inst.errno != errno.EEXIST or not create:
1099 if inst.errno != errno.EEXIST or not create:
1100 raise
1100 raise
1101 if create:
1101 if create:
1102 return self.qrepo(create=True)
1102 return self.qrepo(create=True)
1103
1103
1104 def unapplied(self, repo, patch=None):
1104 def unapplied(self, repo, patch=None):
1105 if patch and patch not in self.series:
1105 if patch and patch not in self.series:
1106 raise util.Abort(_("patch %s is not in series file") % patch)
1106 raise util.Abort(_("patch %s is not in series file") % patch)
1107 if not patch:
1107 if not patch:
1108 start = self.series_end()
1108 start = self.series_end()
1109 else:
1109 else:
1110 start = self.series.index(patch) + 1
1110 start = self.series.index(patch) + 1
1111 unapplied = []
1111 unapplied = []
1112 for i in xrange(start, len(self.series)):
1112 for i in xrange(start, len(self.series)):
1113 pushable, reason = self.pushable(i)
1113 pushable, reason = self.pushable(i)
1114 if pushable:
1114 if pushable:
1115 unapplied.append((i, self.series[i]))
1115 unapplied.append((i, self.series[i]))
1116 self.explain_pushable(i)
1116 self.explain_pushable(i)
1117 return unapplied
1117 return unapplied
1118
1118
1119 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1119 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1120 summary=False):
1120 summary=False):
1121 def displayname(patchname):
1121 def displayname(patchname):
1122 if summary:
1122 if summary:
1123 msg = self.readheaders(patchname)[0]
1123 msg = self.readheaders(patchname)[0]
1124 msg = msg and ': ' + msg[0] or ': '
1124 msg = msg and ': ' + msg[0] or ': '
1125 else:
1125 else:
1126 msg = ''
1126 msg = ''
1127 return '%s%s' % (patchname, msg)
1127 return '%s%s' % (patchname, msg)
1128
1128
1129 applied = dict.fromkeys([p.name for p in self.applied])
1129 applied = dict.fromkeys([p.name for p in self.applied])
1130 if length is None:
1130 if length is None:
1131 length = len(self.series) - start
1131 length = len(self.series) - start
1132 if not missing:
1132 if not missing:
1133 for i in xrange(start, start+length):
1133 for i in xrange(start, start+length):
1134 patch = self.series[i]
1134 patch = self.series[i]
1135 if patch in applied:
1135 if patch in applied:
1136 stat = 'A'
1136 stat = 'A'
1137 elif self.pushable(i)[0]:
1137 elif self.pushable(i)[0]:
1138 stat = 'U'
1138 stat = 'U'
1139 else:
1139 else:
1140 stat = 'G'
1140 stat = 'G'
1141 pfx = ''
1141 pfx = ''
1142 if self.ui.verbose:
1142 if self.ui.verbose:
1143 pfx = '%d %s ' % (i, stat)
1143 pfx = '%d %s ' % (i, stat)
1144 elif status and status != stat:
1144 elif status and status != stat:
1145 continue
1145 continue
1146 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1146 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1147 else:
1147 else:
1148 msng_list = []
1148 msng_list = []
1149 for root, dirs, files in os.walk(self.path):
1149 for root, dirs, files in os.walk(self.path):
1150 d = root[len(self.path) + 1:]
1150 d = root[len(self.path) + 1:]
1151 for f in files:
1151 for f in files:
1152 fl = os.path.join(d, f)
1152 fl = os.path.join(d, f)
1153 if (fl not in self.series and
1153 if (fl not in self.series and
1154 fl not in (self.status_path, self.series_path,
1154 fl not in (self.status_path, self.series_path,
1155 self.guards_path)
1155 self.guards_path)
1156 and not fl.startswith('.')):
1156 and not fl.startswith('.')):
1157 msng_list.append(fl)
1157 msng_list.append(fl)
1158 msng_list.sort()
1158 msng_list.sort()
1159 for x in msng_list:
1159 for x in msng_list:
1160 pfx = self.ui.verbose and ('D ') or ''
1160 pfx = self.ui.verbose and ('D ') or ''
1161 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1161 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1162
1162
1163 def issaveline(self, l):
1163 def issaveline(self, l):
1164 if l.name == '.hg.patches.save.line':
1164 if l.name == '.hg.patches.save.line':
1165 return True
1165 return True
1166
1166
1167 def qrepo(self, create=False):
1167 def qrepo(self, create=False):
1168 if create or os.path.isdir(self.join(".hg")):
1168 if create or os.path.isdir(self.join(".hg")):
1169 return hg.repository(self.ui, path=self.path, create=create)
1169 return hg.repository(self.ui, path=self.path, create=create)
1170
1170
1171 def restore(self, repo, rev, delete=None, qupdate=None):
1171 def restore(self, repo, rev, delete=None, qupdate=None):
1172 c = repo.changelog.read(rev)
1172 c = repo.changelog.read(rev)
1173 desc = c[4].strip()
1173 desc = c[4].strip()
1174 lines = desc.splitlines()
1174 lines = desc.splitlines()
1175 i = 0
1175 i = 0
1176 datastart = None
1176 datastart = None
1177 series = []
1177 series = []
1178 applied = []
1178 applied = []
1179 qpp = None
1179 qpp = None
1180 for i in xrange(0, len(lines)):
1180 for i in xrange(0, len(lines)):
1181 if lines[i] == 'Patch Data:':
1181 if lines[i] == 'Patch Data:':
1182 datastart = i + 1
1182 datastart = i + 1
1183 elif lines[i].startswith('Dirstate:'):
1183 elif lines[i].startswith('Dirstate:'):
1184 l = lines[i].rstrip()
1184 l = lines[i].rstrip()
1185 l = l[10:].split(' ')
1185 l = l[10:].split(' ')
1186 qpp = [ hg.bin(x) for x in l ]
1186 qpp = [ hg.bin(x) for x in l ]
1187 elif datastart != None:
1187 elif datastart != None:
1188 l = lines[i].rstrip()
1188 l = lines[i].rstrip()
1189 se = statusentry(l)
1189 se = statusentry(l)
1190 file_ = se.name
1190 file_ = se.name
1191 if se.rev:
1191 if se.rev:
1192 applied.append(se)
1192 applied.append(se)
1193 else:
1193 else:
1194 series.append(file_)
1194 series.append(file_)
1195 if datastart == None:
1195 if datastart == None:
1196 self.ui.warn("No saved patch data found\n")
1196 self.ui.warn("No saved patch data found\n")
1197 return 1
1197 return 1
1198 self.ui.warn("restoring status: %s\n" % lines[0])
1198 self.ui.warn("restoring status: %s\n" % lines[0])
1199 self.full_series = series
1199 self.full_series = series
1200 self.applied = applied
1200 self.applied = applied
1201 self.parse_series()
1201 self.parse_series()
1202 self.series_dirty = 1
1202 self.series_dirty = 1
1203 self.applied_dirty = 1
1203 self.applied_dirty = 1
1204 heads = repo.changelog.heads()
1204 heads = repo.changelog.heads()
1205 if delete:
1205 if delete:
1206 if rev not in heads:
1206 if rev not in heads:
1207 self.ui.warn("save entry has children, leaving it alone\n")
1207 self.ui.warn("save entry has children, leaving it alone\n")
1208 else:
1208 else:
1209 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1209 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1210 pp = repo.dirstate.parents()
1210 pp = repo.dirstate.parents()
1211 if rev in pp:
1211 if rev in pp:
1212 update = True
1212 update = True
1213 else:
1213 else:
1214 update = False
1214 update = False
1215 self.strip(repo, rev, update=update, backup='strip')
1215 self.strip(repo, rev, update=update, backup='strip')
1216 if qpp:
1216 if qpp:
1217 self.ui.warn("saved queue repository parents: %s %s\n" %
1217 self.ui.warn("saved queue repository parents: %s %s\n" %
1218 (hg.short(qpp[0]), hg.short(qpp[1])))
1218 (hg.short(qpp[0]), hg.short(qpp[1])))
1219 if qupdate:
1219 if qupdate:
1220 print "queue directory updating"
1220 print "queue directory updating"
1221 r = self.qrepo()
1221 r = self.qrepo()
1222 if not r:
1222 if not r:
1223 self.ui.warn("Unable to load queue repository\n")
1223 self.ui.warn("Unable to load queue repository\n")
1224 return 1
1224 return 1
1225 hg.clean(r, qpp[0])
1225 hg.clean(r, qpp[0])
1226
1226
1227 def save(self, repo, msg=None):
1227 def save(self, repo, msg=None):
1228 if len(self.applied) == 0:
1228 if len(self.applied) == 0:
1229 self.ui.warn("save: no patches applied, exiting\n")
1229 self.ui.warn("save: no patches applied, exiting\n")
1230 return 1
1230 return 1
1231 if self.issaveline(self.applied[-1]):
1231 if self.issaveline(self.applied[-1]):
1232 self.ui.warn("status is already saved\n")
1232 self.ui.warn("status is already saved\n")
1233 return 1
1233 return 1
1234
1234
1235 ar = [ ':' + x for x in self.full_series ]
1235 ar = [ ':' + x for x in self.full_series ]
1236 if not msg:
1236 if not msg:
1237 msg = "hg patches saved state"
1237 msg = "hg patches saved state"
1238 else:
1238 else:
1239 msg = "hg patches: " + msg.rstrip('\r\n')
1239 msg = "hg patches: " + msg.rstrip('\r\n')
1240 r = self.qrepo()
1240 r = self.qrepo()
1241 if r:
1241 if r:
1242 pp = r.dirstate.parents()
1242 pp = r.dirstate.parents()
1243 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1243 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1244 msg += "\n\nPatch Data:\n"
1244 msg += "\n\nPatch Data:\n"
1245 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1245 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1246 "\n".join(ar) + '\n' or "")
1246 "\n".join(ar) + '\n' or "")
1247 n = repo.commit(None, text, user=None, force=1)
1247 n = repo.commit(None, text, user=None, force=1)
1248 if not n:
1248 if not n:
1249 self.ui.warn("repo commit failed\n")
1249 self.ui.warn("repo commit failed\n")
1250 return 1
1250 return 1
1251 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1251 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1252 self.applied_dirty = 1
1252 self.applied_dirty = 1
1253 self.removeundo(repo)
1253 self.removeundo(repo)
1254
1254
1255 def full_series_end(self):
1255 def full_series_end(self):
1256 if len(self.applied) > 0:
1256 if len(self.applied) > 0:
1257 p = self.applied[-1].name
1257 p = self.applied[-1].name
1258 end = self.find_series(p)
1258 end = self.find_series(p)
1259 if end == None:
1259 if end == None:
1260 return len(self.full_series)
1260 return len(self.full_series)
1261 return end + 1
1261 return end + 1
1262 return 0
1262 return 0
1263
1263
1264 def series_end(self, all_patches=False):
1264 def series_end(self, all_patches=False):
1265 """If all_patches is False, return the index of the next pushable patch
1265 """If all_patches is False, return the index of the next pushable patch
1266 in the series, or the series length. If all_patches is True, return the
1266 in the series, or the series length. If all_patches is True, return the
1267 index of the first patch past the last applied one.
1267 index of the first patch past the last applied one.
1268 """
1268 """
1269 end = 0
1269 end = 0
1270 def next(start):
1270 def next(start):
1271 if all_patches:
1271 if all_patches:
1272 return start
1272 return start
1273 i = start
1273 i = start
1274 while i < len(self.series):
1274 while i < len(self.series):
1275 p, reason = self.pushable(i)
1275 p, reason = self.pushable(i)
1276 if p:
1276 if p:
1277 break
1277 break
1278 self.explain_pushable(i)
1278 self.explain_pushable(i)
1279 i += 1
1279 i += 1
1280 return i
1280 return i
1281 if len(self.applied) > 0:
1281 if len(self.applied) > 0:
1282 p = self.applied[-1].name
1282 p = self.applied[-1].name
1283 try:
1283 try:
1284 end = self.series.index(p)
1284 end = self.series.index(p)
1285 except ValueError:
1285 except ValueError:
1286 return 0
1286 return 0
1287 return next(end + 1)
1287 return next(end + 1)
1288 return next(end)
1288 return next(end)
1289
1289
1290 def appliedname(self, index):
1290 def appliedname(self, index):
1291 pname = self.applied[index].name
1291 pname = self.applied[index].name
1292 if not self.ui.verbose:
1292 if not self.ui.verbose:
1293 p = pname
1293 p = pname
1294 else:
1294 else:
1295 p = str(self.series.index(pname)) + " " + pname
1295 p = str(self.series.index(pname)) + " " + pname
1296 return p
1296 return p
1297
1297
1298 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1298 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1299 force=None, git=False):
1299 force=None, git=False):
1300 def checkseries(patchname):
1300 def checkseries(patchname):
1301 if patchname in self.series:
1301 if patchname in self.series:
1302 raise util.Abort(_('patch %s is already in the series file')
1302 raise util.Abort(_('patch %s is already in the series file')
1303 % patchname)
1303 % patchname)
1304 def checkfile(patchname):
1304 def checkfile(patchname):
1305 if not force and os.path.exists(self.join(patchname)):
1305 if not force and os.path.exists(self.join(patchname)):
1306 raise util.Abort(_('patch "%s" already exists')
1306 raise util.Abort(_('patch "%s" already exists')
1307 % patchname)
1307 % patchname)
1308
1308
1309 if rev:
1309 if rev:
1310 if files:
1310 if files:
1311 raise util.Abort(_('option "-r" not valid when importing '
1311 raise util.Abort(_('option "-r" not valid when importing '
1312 'files'))
1312 'files'))
1313 rev = cmdutil.revrange(repo, rev)
1313 rev = cmdutil.revrange(repo, rev)
1314 rev.sort(lambda x, y: cmp(y, x))
1314 rev.sort(lambda x, y: cmp(y, x))
1315 if (len(files) > 1 or len(rev) > 1) and patchname:
1315 if (len(files) > 1 or len(rev) > 1) and patchname:
1316 raise util.Abort(_('option "-n" not valid when importing multiple '
1316 raise util.Abort(_('option "-n" not valid when importing multiple '
1317 'patches'))
1317 'patches'))
1318 i = 0
1318 i = 0
1319 added = []
1319 added = []
1320 if rev:
1320 if rev:
1321 # If mq patches are applied, we can only import revisions
1321 # If mq patches are applied, we can only import revisions
1322 # that form a linear path to qbase.
1322 # that form a linear path to qbase.
1323 # Otherwise, they should form a linear path to a head.
1323 # Otherwise, they should form a linear path to a head.
1324 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1324 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1325 if len(heads) > 1:
1325 if len(heads) > 1:
1326 raise util.Abort(_('revision %d is the root of more than one '
1326 raise util.Abort(_('revision %d is the root of more than one '
1327 'branch') % rev[-1])
1327 'branch') % rev[-1])
1328 if self.applied:
1328 if self.applied:
1329 base = revlog.hex(repo.changelog.node(rev[0]))
1329 base = revlog.hex(repo.changelog.node(rev[0]))
1330 if base in [n.rev for n in self.applied]:
1330 if base in [n.rev for n in self.applied]:
1331 raise util.Abort(_('revision %d is already managed')
1331 raise util.Abort(_('revision %d is already managed')
1332 % rev[0])
1332 % rev[0])
1333 if heads != [revlog.bin(self.applied[-1].rev)]:
1333 if heads != [revlog.bin(self.applied[-1].rev)]:
1334 raise util.Abort(_('revision %d is not the parent of '
1334 raise util.Abort(_('revision %d is not the parent of '
1335 'the queue') % rev[0])
1335 'the queue') % rev[0])
1336 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1336 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1337 lastparent = repo.changelog.parentrevs(base)[0]
1337 lastparent = repo.changelog.parentrevs(base)[0]
1338 else:
1338 else:
1339 if heads != [repo.changelog.node(rev[0])]:
1339 if heads != [repo.changelog.node(rev[0])]:
1340 raise util.Abort(_('revision %d has unmanaged children')
1340 raise util.Abort(_('revision %d has unmanaged children')
1341 % rev[0])
1341 % rev[0])
1342 lastparent = None
1342 lastparent = None
1343
1343
1344 if git:
1344 if git:
1345 self.diffopts().git = True
1345 self.diffopts().git = True
1346
1346
1347 for r in rev:
1347 for r in rev:
1348 p1, p2 = repo.changelog.parentrevs(r)
1348 p1, p2 = repo.changelog.parentrevs(r)
1349 n = repo.changelog.node(r)
1349 n = repo.changelog.node(r)
1350 if p2 != revlog.nullrev:
1350 if p2 != revlog.nullrev:
1351 raise util.Abort(_('cannot import merge revision %d') % r)
1351 raise util.Abort(_('cannot import merge revision %d') % r)
1352 if lastparent and lastparent != r:
1352 if lastparent and lastparent != r:
1353 raise util.Abort(_('revision %d is not the parent of %d')
1353 raise util.Abort(_('revision %d is not the parent of %d')
1354 % (r, lastparent))
1354 % (r, lastparent))
1355 lastparent = p1
1355 lastparent = p1
1356
1356
1357 if not patchname:
1357 if not patchname:
1358 patchname = normname('%d.diff' % r)
1358 patchname = normname('%d.diff' % r)
1359 checkseries(patchname)
1359 checkseries(patchname)
1360 checkfile(patchname)
1360 checkfile(patchname)
1361 self.full_series.insert(0, patchname)
1361 self.full_series.insert(0, patchname)
1362
1362
1363 patchf = self.opener(patchname, "w")
1363 patchf = self.opener(patchname, "w")
1364 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1364 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1365 patchf.close()
1365 patchf.close()
1366
1366
1367 se = statusentry(revlog.hex(n), patchname)
1367 se = statusentry(revlog.hex(n), patchname)
1368 self.applied.insert(0, se)
1368 self.applied.insert(0, se)
1369
1369
1370 added.append(patchname)
1370 added.append(patchname)
1371 patchname = None
1371 patchname = None
1372 self.parse_series()
1372 self.parse_series()
1373 self.applied_dirty = 1
1373 self.applied_dirty = 1
1374
1374
1375 for filename in files:
1375 for filename in files:
1376 if existing:
1376 if existing:
1377 if filename == '-':
1377 if filename == '-':
1378 raise util.Abort(_('-e is incompatible with import from -'))
1378 raise util.Abort(_('-e is incompatible with import from -'))
1379 if not patchname:
1379 if not patchname:
1380 patchname = normname(filename)
1380 patchname = normname(filename)
1381 if not os.path.isfile(self.join(patchname)):
1381 if not os.path.isfile(self.join(patchname)):
1382 raise util.Abort(_("patch %s does not exist") % patchname)
1382 raise util.Abort(_("patch %s does not exist") % patchname)
1383 else:
1383 else:
1384 try:
1384 try:
1385 if filename == '-':
1385 if filename == '-':
1386 if not patchname:
1386 if not patchname:
1387 raise util.Abort(_('need --name to import a patch from -'))
1387 raise util.Abort(_('need --name to import a patch from -'))
1388 text = sys.stdin.read()
1388 text = sys.stdin.read()
1389 else:
1389 else:
1390 text = file(filename).read()
1390 text = file(filename).read()
1391 except IOError:
1391 except IOError:
1392 raise util.Abort(_("unable to read %s") % patchname)
1392 raise util.Abort(_("unable to read %s") % patchname)
1393 if not patchname:
1393 if not patchname:
1394 patchname = normname(os.path.basename(filename))
1394 patchname = normname(os.path.basename(filename))
1395 checkfile(patchname)
1395 checkfile(patchname)
1396 patchf = self.opener(patchname, "w")
1396 patchf = self.opener(patchname, "w")
1397 patchf.write(text)
1397 patchf.write(text)
1398 checkseries(patchname)
1398 checkseries(patchname)
1399 index = self.full_series_end() + i
1399 index = self.full_series_end() + i
1400 self.full_series[index:index] = [patchname]
1400 self.full_series[index:index] = [patchname]
1401 self.parse_series()
1401 self.parse_series()
1402 self.ui.warn("adding %s to series file\n" % patchname)
1402 self.ui.warn("adding %s to series file\n" % patchname)
1403 i += 1
1403 i += 1
1404 added.append(patchname)
1404 added.append(patchname)
1405 patchname = None
1405 patchname = None
1406 self.series_dirty = 1
1406 self.series_dirty = 1
1407 qrepo = self.qrepo()
1407 qrepo = self.qrepo()
1408 if qrepo:
1408 if qrepo:
1409 qrepo.add(added)
1409 qrepo.add(added)
1410
1410
1411 def delete(ui, repo, *patches, **opts):
1411 def delete(ui, repo, *patches, **opts):
1412 """remove patches from queue
1412 """remove patches from queue
1413
1413
1414 The patches must not be applied, unless they are arguments to
1414 The patches must not be applied, unless they are arguments to
1415 the --rev parameter. At least one patch or revision is required.
1415 the --rev parameter. At least one patch or revision is required.
1416
1416
1417 With --rev, mq will stop managing the named revisions (converting
1417 With --rev, mq will stop managing the named revisions (converting
1418 them to regular mercurial changesets). The patches must be applied
1418 them to regular mercurial changesets). The patches must be applied
1419 and at the base of the stack. This option is useful when the patches
1419 and at the base of the stack. This option is useful when the patches
1420 have been applied upstream.
1420 have been applied upstream.
1421
1421
1422 With --keep, the patch files are preserved in the patch directory."""
1422 With --keep, the patch files are preserved in the patch directory."""
1423 q = repo.mq
1423 q = repo.mq
1424 q.delete(repo, patches, opts)
1424 q.delete(repo, patches, opts)
1425 q.save_dirty()
1425 q.save_dirty()
1426 return 0
1426 return 0
1427
1427
1428 def applied(ui, repo, patch=None, **opts):
1428 def applied(ui, repo, patch=None, **opts):
1429 """print the patches already applied"""
1429 """print the patches already applied"""
1430 q = repo.mq
1430 q = repo.mq
1431 if patch:
1431 if patch:
1432 if patch not in q.series:
1432 if patch not in q.series:
1433 raise util.Abort(_("patch %s is not in series file") % patch)
1433 raise util.Abort(_("patch %s is not in series file") % patch)
1434 end = q.series.index(patch) + 1
1434 end = q.series.index(patch) + 1
1435 else:
1435 else:
1436 end = q.series_end(True)
1436 end = q.series_end(True)
1437 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1437 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1438
1438
1439 def unapplied(ui, repo, patch=None, **opts):
1439 def unapplied(ui, repo, patch=None, **opts):
1440 """print the patches not yet applied"""
1440 """print the patches not yet applied"""
1441 q = repo.mq
1441 q = repo.mq
1442 if patch:
1442 if patch:
1443 if patch not in q.series:
1443 if patch not in q.series:
1444 raise util.Abort(_("patch %s is not in series file") % patch)
1444 raise util.Abort(_("patch %s is not in series file") % patch)
1445 start = q.series.index(patch) + 1
1445 start = q.series.index(patch) + 1
1446 else:
1446 else:
1447 start = q.series_end(True)
1447 start = q.series_end(True)
1448 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1448 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1449
1449
1450 def qimport(ui, repo, *filename, **opts):
1450 def qimport(ui, repo, *filename, **opts):
1451 """import a patch
1451 """import a patch
1452
1452
1453 The patch will have the same name as its source file unless you
1453 The patch will have the same name as its source file unless you
1454 give it a new one with --name.
1454 give it a new one with --name.
1455
1455
1456 You can register an existing patch inside the patch directory
1456 You can register an existing patch inside the patch directory
1457 with the --existing flag.
1457 with the --existing flag.
1458
1458
1459 With --force, an existing patch of the same name will be overwritten.
1459 With --force, an existing patch of the same name will be overwritten.
1460
1460
1461 An existing changeset may be placed under mq control with --rev
1461 An existing changeset may be placed under mq control with --rev
1462 (e.g. qimport --rev tip -n patch will place tip under mq control).
1462 (e.g. qimport --rev tip -n patch will place tip under mq control).
1463 With --git, patches imported with --rev will use the git diff
1463 With --git, patches imported with --rev will use the git diff
1464 format.
1464 format.
1465 """
1465 """
1466 q = repo.mq
1466 q = repo.mq
1467 q.qimport(repo, filename, patchname=opts['name'],
1467 q.qimport(repo, filename, patchname=opts['name'],
1468 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1468 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1469 git=opts['git'])
1469 git=opts['git'])
1470 q.save_dirty()
1470 q.save_dirty()
1471 return 0
1471 return 0
1472
1472
1473 def init(ui, repo, **opts):
1473 def init(ui, repo, **opts):
1474 """init a new queue repository
1474 """init a new queue repository
1475
1475
1476 The queue repository is unversioned by default. If -c is
1476 The queue repository is unversioned by default. If -c is
1477 specified, qinit will create a separate nested repository
1477 specified, qinit will create a separate nested repository
1478 for patches (qinit -c may also be run later to convert
1478 for patches (qinit -c may also be run later to convert
1479 an unversioned patch repository into a versioned one).
1479 an unversioned patch repository into a versioned one).
1480 You can use qcommit to commit changes to this queue repository."""
1480 You can use qcommit to commit changes to this queue repository."""
1481 q = repo.mq
1481 q = repo.mq
1482 r = q.init(repo, create=opts['create_repo'])
1482 r = q.init(repo, create=opts['create_repo'])
1483 q.save_dirty()
1483 q.save_dirty()
1484 if r:
1484 if r:
1485 if not os.path.exists(r.wjoin('.hgignore')):
1485 if not os.path.exists(r.wjoin('.hgignore')):
1486 fp = r.wopener('.hgignore', 'w')
1486 fp = r.wopener('.hgignore', 'w')
1487 fp.write('syntax: glob\n')
1487 fp.write('syntax: glob\n')
1488 fp.write('status\n')
1488 fp.write('status\n')
1489 fp.write('guards\n')
1489 fp.write('guards\n')
1490 fp.close()
1490 fp.close()
1491 if not os.path.exists(r.wjoin('series')):
1491 if not os.path.exists(r.wjoin('series')):
1492 r.wopener('series', 'w').close()
1492 r.wopener('series', 'w').close()
1493 r.add(['.hgignore', 'series'])
1493 r.add(['.hgignore', 'series'])
1494 commands.add(ui, r)
1494 commands.add(ui, r)
1495 return 0
1495 return 0
1496
1496
1497 def clone(ui, source, dest=None, **opts):
1497 def clone(ui, source, dest=None, **opts):
1498 '''clone main and patch repository at same time
1498 '''clone main and patch repository at same time
1499
1499
1500 If source is local, destination will have no patches applied. If
1500 If source is local, destination will have no patches applied. If
1501 source is remote, this command can not check if patches are
1501 source is remote, this command can not check if patches are
1502 applied in source, so cannot guarantee that patches are not
1502 applied in source, so cannot guarantee that patches are not
1503 applied in destination. If you clone remote repository, be sure
1503 applied in destination. If you clone remote repository, be sure
1504 before that it has no patches applied.
1504 before that it has no patches applied.
1505
1505
1506 Source patch repository is looked for in <src>/.hg/patches by
1506 Source patch repository is looked for in <src>/.hg/patches by
1507 default. Use -p <url> to change.
1507 default. Use -p <url> to change.
1508
1508
1509 The patch directory must be a nested mercurial repository, as
1509 The patch directory must be a nested mercurial repository, as
1510 would be created by qinit -c.
1510 would be created by qinit -c.
1511 '''
1511 '''
1512 cmdutil.setremoteconfig(ui, opts)
1512 cmdutil.setremoteconfig(ui, opts)
1513 if dest is None:
1513 if dest is None:
1514 dest = hg.defaultdest(source)
1514 dest = hg.defaultdest(source)
1515 sr = hg.repository(ui, ui.expandpath(source))
1515 sr = hg.repository(ui, ui.expandpath(source))
1516 patchdir = opts['patches'] or (sr.url() + '/.hg/patches')
1516 patchdir = opts['patches'] or (sr.url() + '/.hg/patches')
1517 try:
1517 try:
1518 pr = hg.repository(ui, patchdir)
1518 pr = hg.repository(ui, patchdir)
1519 except hg.RepoError:
1519 except hg.RepoError:
1520 raise util.Abort(_('versioned patch repository not found'
1520 raise util.Abort(_('versioned patch repository not found'
1521 ' (see qinit -c)'))
1521 ' (see qinit -c)'))
1522 qbase, destrev = None, None
1522 qbase, destrev = None, None
1523 if sr.local():
1523 if sr.local():
1524 if sr.mq.applied:
1524 if sr.mq.applied:
1525 qbase = revlog.bin(sr.mq.applied[0].rev)
1525 qbase = revlog.bin(sr.mq.applied[0].rev)
1526 if not hg.islocal(dest):
1526 if not hg.islocal(dest):
1527 heads = dict.fromkeys(sr.heads())
1527 heads = dict.fromkeys(sr.heads())
1528 for h in sr.heads(qbase):
1528 for h in sr.heads(qbase):
1529 del heads[h]
1529 del heads[h]
1530 destrev = heads.keys()
1530 destrev = heads.keys()
1531 destrev.append(sr.changelog.parents(qbase)[0])
1531 destrev.append(sr.changelog.parents(qbase)[0])
1532 ui.note(_('cloning main repo\n'))
1532 ui.note(_('cloning main repo\n'))
1533 sr, dr = hg.clone(ui, sr.url(), dest,
1533 sr, dr = hg.clone(ui, sr.url(), dest,
1534 pull=opts['pull'],
1534 pull=opts['pull'],
1535 rev=destrev,
1535 rev=destrev,
1536 update=False,
1536 update=False,
1537 stream=opts['uncompressed'])
1537 stream=opts['uncompressed'])
1538 ui.note(_('cloning patch repo\n'))
1538 ui.note(_('cloning patch repo\n'))
1539 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1539 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1540 dr.url() + '/.hg/patches',
1540 dr.url() + '/.hg/patches',
1541 pull=opts['pull'],
1541 pull=opts['pull'],
1542 update=not opts['noupdate'],
1542 update=not opts['noupdate'],
1543 stream=opts['uncompressed'])
1543 stream=opts['uncompressed'])
1544 if dr.local():
1544 if dr.local():
1545 if qbase:
1545 if qbase:
1546 ui.note(_('stripping applied patches from destination repo\n'))
1546 ui.note(_('stripping applied patches from destination repo\n'))
1547 dr.mq.strip(dr, qbase, update=False, backup=None)
1547 dr.mq.strip(dr, qbase, update=False, backup=None)
1548 if not opts['noupdate']:
1548 if not opts['noupdate']:
1549 ui.note(_('updating destination repo\n'))
1549 ui.note(_('updating destination repo\n'))
1550 hg.update(dr, dr.changelog.tip())
1550 hg.update(dr, dr.changelog.tip())
1551
1551
1552 def commit(ui, repo, *pats, **opts):
1552 def commit(ui, repo, *pats, **opts):
1553 """commit changes in the queue repository"""
1553 """commit changes in the queue repository"""
1554 q = repo.mq
1554 q = repo.mq
1555 r = q.qrepo()
1555 r = q.qrepo()
1556 if not r: raise util.Abort('no queue repository')
1556 if not r: raise util.Abort('no queue repository')
1557 commands.commit(r.ui, r, *pats, **opts)
1557 commands.commit(r.ui, r, *pats, **opts)
1558
1558
1559 def series(ui, repo, **opts):
1559 def series(ui, repo, **opts):
1560 """print the entire series file"""
1560 """print the entire series file"""
1561 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1561 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1562 return 0
1562 return 0
1563
1563
1564 def top(ui, repo, **opts):
1564 def top(ui, repo, **opts):
1565 """print the name of the current patch"""
1565 """print the name of the current patch"""
1566 q = repo.mq
1566 q = repo.mq
1567 t = q.applied and q.series_end(True) or 0
1567 t = q.applied and q.series_end(True) or 0
1568 if t:
1568 if t:
1569 return q.qseries(repo, start=t-1, length=1, status='A',
1569 return q.qseries(repo, start=t-1, length=1, status='A',
1570 summary=opts.get('summary'))
1570 summary=opts.get('summary'))
1571 else:
1571 else:
1572 ui.write("No patches applied\n")
1572 ui.write("No patches applied\n")
1573 return 1
1573 return 1
1574
1574
1575 def next(ui, repo, **opts):
1575 def next(ui, repo, **opts):
1576 """print the name of the next patch"""
1576 """print the name of the next patch"""
1577 q = repo.mq
1577 q = repo.mq
1578 end = q.series_end()
1578 end = q.series_end()
1579 if end == len(q.series):
1579 if end == len(q.series):
1580 ui.write("All patches applied\n")
1580 ui.write("All patches applied\n")
1581 return 1
1581 return 1
1582 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1582 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1583
1583
1584 def prev(ui, repo, **opts):
1584 def prev(ui, repo, **opts):
1585 """print the name of the previous patch"""
1585 """print the name of the previous patch"""
1586 q = repo.mq
1586 q = repo.mq
1587 l = len(q.applied)
1587 l = len(q.applied)
1588 if l == 1:
1588 if l == 1:
1589 ui.write("Only one patch applied\n")
1589 ui.write("Only one patch applied\n")
1590 return 1
1590 return 1
1591 if not l:
1591 if not l:
1592 ui.write("No patches applied\n")
1592 ui.write("No patches applied\n")
1593 return 1
1593 return 1
1594 return q.qseries(repo, start=l-2, length=1, status='A',
1594 return q.qseries(repo, start=l-2, length=1, status='A',
1595 summary=opts.get('summary'))
1595 summary=opts.get('summary'))
1596
1596
1597 def new(ui, repo, patch, *args, **opts):
1597 def new(ui, repo, patch, *args, **opts):
1598 """create a new patch
1598 """create a new patch
1599
1599
1600 qnew creates a new patch on top of the currently-applied patch
1600 qnew creates a new patch on top of the currently-applied patch
1601 (if any). It will refuse to run if there are any outstanding
1601 (if any). It will refuse to run if there are any outstanding
1602 changes unless -f is specified, in which case the patch will
1602 changes unless -f is specified, in which case the patch will
1603 be initialised with them. You may also use -I, -X, and/or a list of
1603 be initialised with them. You may also use -I, -X, and/or a list of
1604 files after the patch name to add only changes to matching files
1604 files after the patch name to add only changes to matching files
1605 to the new patch, leaving the rest as uncommitted modifications.
1605 to the new patch, leaving the rest as uncommitted modifications.
1606
1606
1607 -e, -m or -l set the patch header as well as the commit message.
1607 -e, -m or -l set the patch header as well as the commit message.
1608 If none is specified, the patch header is empty and the
1608 If none is specified, the patch header is empty and the
1609 commit message is '[mq]: PATCH'"""
1609 commit message is '[mq]: PATCH'"""
1610 q = repo.mq
1610 q = repo.mq
1611 message = cmdutil.logmessage(opts)
1611 message = cmdutil.logmessage(opts)
1612 if opts['edit']:
1612 if opts['edit']:
1613 message = ui.edit(message, ui.username())
1613 message = ui.edit(message, ui.username())
1614 opts['msg'] = message
1614 opts['msg'] = message
1615 q.new(repo, patch, *args, **opts)
1615 q.new(repo, patch, *args, **opts)
1616 q.save_dirty()
1616 q.save_dirty()
1617 return 0
1617 return 0
1618
1618
1619 def refresh(ui, repo, *pats, **opts):
1619 def refresh(ui, repo, *pats, **opts):
1620 """update the current patch
1620 """update the current patch
1621
1621
1622 If any file patterns are provided, the refreshed patch will contain only
1622 If any file patterns are provided, the refreshed patch will contain only
1623 the modifications that match those patterns; the remaining modifications
1623 the modifications that match those patterns; the remaining modifications
1624 will remain in the working directory.
1624 will remain in the working directory.
1625
1625
1626 hg add/remove/copy/rename work as usual, though you might want to use
1626 hg add/remove/copy/rename work as usual, though you might want to use
1627 git-style patches (--git or [diff] git=1) to track copies and renames.
1627 git-style patches (--git or [diff] git=1) to track copies and renames.
1628 """
1628 """
1629 q = repo.mq
1629 q = repo.mq
1630 message = cmdutil.logmessage(opts)
1630 message = cmdutil.logmessage(opts)
1631 if opts['edit']:
1631 if opts['edit']:
1632 if message:
1632 if message:
1633 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1633 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1634 patch = q.applied[-1].name
1634 patch = q.applied[-1].name
1635 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1635 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1636 message = ui.edit('\n'.join(message), user or ui.username())
1636 message = ui.edit('\n'.join(message), user or ui.username())
1637 ret = q.refresh(repo, pats, msg=message, **opts)
1637 ret = q.refresh(repo, pats, msg=message, **opts)
1638 q.save_dirty()
1638 q.save_dirty()
1639 return ret
1639 return ret
1640
1640
1641 def diff(ui, repo, *pats, **opts):
1641 def diff(ui, repo, *pats, **opts):
1642 """diff of the current patch"""
1642 """diff of the current patch"""
1643 repo.mq.diff(repo, pats, opts)
1643 repo.mq.diff(repo, pats, opts)
1644 return 0
1644 return 0
1645
1645
1646 def fold(ui, repo, *files, **opts):
1646 def fold(ui, repo, *files, **opts):
1647 """fold the named patches into the current patch
1647 """fold the named patches into the current patch
1648
1648
1649 Patches must not yet be applied. Each patch will be successively
1649 Patches must not yet be applied. Each patch will be successively
1650 applied to the current patch in the order given. If all the
1650 applied to the current patch in the order given. If all the
1651 patches apply successfully, the current patch will be refreshed
1651 patches apply successfully, the current patch will be refreshed
1652 with the new cumulative patch, and the folded patches will
1652 with the new cumulative patch, and the folded patches will
1653 be deleted. With -k/--keep, the folded patch files will not
1653 be deleted. With -k/--keep, the folded patch files will not
1654 be removed afterwards.
1654 be removed afterwards.
1655
1655
1656 The header for each folded patch will be concatenated with
1656 The header for each folded patch will be concatenated with
1657 the current patch header, separated by a line of '* * *'."""
1657 the current patch header, separated by a line of '* * *'."""
1658
1658
1659 q = repo.mq
1659 q = repo.mq
1660
1660
1661 if not files:
1661 if not files:
1662 raise util.Abort(_('qfold requires at least one patch name'))
1662 raise util.Abort(_('qfold requires at least one patch name'))
1663 if not q.check_toppatch(repo):
1663 if not q.check_toppatch(repo):
1664 raise util.Abort(_('No patches applied'))
1664 raise util.Abort(_('No patches applied'))
1665
1665
1666 message = cmdutil.logmessage(opts)
1666 message = cmdutil.logmessage(opts)
1667 if opts['edit']:
1667 if opts['edit']:
1668 if message:
1668 if message:
1669 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1669 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1670
1670
1671 parent = q.lookup('qtip')
1671 parent = q.lookup('qtip')
1672 patches = []
1672 patches = []
1673 messages = []
1673 messages = []
1674 for f in files:
1674 for f in files:
1675 p = q.lookup(f)
1675 p = q.lookup(f)
1676 if p in patches or p == parent:
1676 if p in patches or p == parent:
1677 ui.warn(_('Skipping already folded patch %s') % p)
1677 ui.warn(_('Skipping already folded patch %s') % p)
1678 if q.isapplied(p):
1678 if q.isapplied(p):
1679 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1679 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1680 patches.append(p)
1680 patches.append(p)
1681
1681
1682 for p in patches:
1682 for p in patches:
1683 if not message:
1683 if not message:
1684 messages.append(q.readheaders(p)[0])
1684 messages.append(q.readheaders(p)[0])
1685 pf = q.join(p)
1685 pf = q.join(p)
1686 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1686 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1687 if not patchsuccess:
1687 if not patchsuccess:
1688 raise util.Abort(_('Error folding patch %s') % p)
1688 raise util.Abort(_('Error folding patch %s') % p)
1689 patch.updatedir(ui, repo, files)
1689 patch.updatedir(ui, repo, files)
1690
1690
1691 if not message:
1691 if not message:
1692 message, comments, user = q.readheaders(parent)[0:3]
1692 message, comments, user = q.readheaders(parent)[0:3]
1693 for msg in messages:
1693 for msg in messages:
1694 message.append('* * *')
1694 message.append('* * *')
1695 message.extend(msg)
1695 message.extend(msg)
1696 message = '\n'.join(message)
1696 message = '\n'.join(message)
1697
1697
1698 if opts['edit']:
1698 if opts['edit']:
1699 message = ui.edit(message, user or ui.username())
1699 message = ui.edit(message, user or ui.username())
1700
1700
1701 q.refresh(repo, msg=message)
1701 q.refresh(repo, msg=message)
1702 q.delete(repo, patches, opts)
1702 q.delete(repo, patches, opts)
1703 q.save_dirty()
1703 q.save_dirty()
1704
1704
1705 def goto(ui, repo, patch, **opts):
1705 def goto(ui, repo, patch, **opts):
1706 '''push or pop patches until named patch is at top of stack'''
1706 '''push or pop patches until named patch is at top of stack'''
1707 q = repo.mq
1707 q = repo.mq
1708 patch = q.lookup(patch)
1708 patch = q.lookup(patch)
1709 if q.isapplied(patch):
1709 if q.isapplied(patch):
1710 ret = q.pop(repo, patch, force=opts['force'])
1710 ret = q.pop(repo, patch, force=opts['force'])
1711 else:
1711 else:
1712 ret = q.push(repo, patch, force=opts['force'])
1712 ret = q.push(repo, patch, force=opts['force'])
1713 q.save_dirty()
1713 q.save_dirty()
1714 return ret
1714 return ret
1715
1715
1716 def guard(ui, repo, *args, **opts):
1716 def guard(ui, repo, *args, **opts):
1717 '''set or print guards for a patch
1717 '''set or print guards for a patch
1718
1718
1719 Guards control whether a patch can be pushed. A patch with no
1719 Guards control whether a patch can be pushed. A patch with no
1720 guards is always pushed. A patch with a positive guard ("+foo") is
1720 guards is always pushed. A patch with a positive guard ("+foo") is
1721 pushed only if the qselect command has activated it. A patch with
1721 pushed only if the qselect command has activated it. A patch with
1722 a negative guard ("-foo") is never pushed if the qselect command
1722 a negative guard ("-foo") is never pushed if the qselect command
1723 has activated it.
1723 has activated it.
1724
1724
1725 With no arguments, print the currently active guards.
1725 With no arguments, print the currently active guards.
1726 With arguments, set guards for the named patch.
1726 With arguments, set guards for the named patch.
1727
1727
1728 To set a negative guard "-foo" on topmost patch ("--" is needed so
1728 To set a negative guard "-foo" on topmost patch ("--" is needed so
1729 hg will not interpret "-foo" as an option):
1729 hg will not interpret "-foo" as an option):
1730 hg qguard -- -foo
1730 hg qguard -- -foo
1731
1731
1732 To set guards on another patch:
1732 To set guards on another patch:
1733 hg qguard other.patch +2.6.17 -stable
1733 hg qguard other.patch +2.6.17 -stable
1734 '''
1734 '''
1735 def status(idx):
1735 def status(idx):
1736 guards = q.series_guards[idx] or ['unguarded']
1736 guards = q.series_guards[idx] or ['unguarded']
1737 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1737 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1738 q = repo.mq
1738 q = repo.mq
1739 patch = None
1739 patch = None
1740 args = list(args)
1740 args = list(args)
1741 if opts['list']:
1741 if opts['list']:
1742 if args or opts['none']:
1742 if args or opts['none']:
1743 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1743 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1744 for i in xrange(len(q.series)):
1744 for i in xrange(len(q.series)):
1745 status(i)
1745 status(i)
1746 return
1746 return
1747 if not args or args[0][0:1] in '-+':
1747 if not args or args[0][0:1] in '-+':
1748 if not q.applied:
1748 if not q.applied:
1749 raise util.Abort(_('no patches applied'))
1749 raise util.Abort(_('no patches applied'))
1750 patch = q.applied[-1].name
1750 patch = q.applied[-1].name
1751 if patch is None and args[0][0:1] not in '-+':
1751 if patch is None and args[0][0:1] not in '-+':
1752 patch = args.pop(0)
1752 patch = args.pop(0)
1753 if patch is None:
1753 if patch is None:
1754 raise util.Abort(_('no patch to work with'))
1754 raise util.Abort(_('no patch to work with'))
1755 if args or opts['none']:
1755 if args or opts['none']:
1756 idx = q.find_series(patch)
1756 idx = q.find_series(patch)
1757 if idx is None:
1757 if idx is None:
1758 raise util.Abort(_('no patch named %s') % patch)
1758 raise util.Abort(_('no patch named %s') % patch)
1759 q.set_guards(idx, args)
1759 q.set_guards(idx, args)
1760 q.save_dirty()
1760 q.save_dirty()
1761 else:
1761 else:
1762 status(q.series.index(q.lookup(patch)))
1762 status(q.series.index(q.lookup(patch)))
1763
1763
1764 def header(ui, repo, patch=None):
1764 def header(ui, repo, patch=None):
1765 """Print the header of the topmost or specified patch"""
1765 """Print the header of the topmost or specified patch"""
1766 q = repo.mq
1766 q = repo.mq
1767
1767
1768 if patch:
1768 if patch:
1769 patch = q.lookup(patch)
1769 patch = q.lookup(patch)
1770 else:
1770 else:
1771 if not q.applied:
1771 if not q.applied:
1772 ui.write('No patches applied\n')
1772 ui.write('No patches applied\n')
1773 return 1
1773 return 1
1774 patch = q.lookup('qtip')
1774 patch = q.lookup('qtip')
1775 message = repo.mq.readheaders(patch)[0]
1775 message = repo.mq.readheaders(patch)[0]
1776
1776
1777 ui.write('\n'.join(message) + '\n')
1777 ui.write('\n'.join(message) + '\n')
1778
1778
1779 def lastsavename(path):
1779 def lastsavename(path):
1780 (directory, base) = os.path.split(path)
1780 (directory, base) = os.path.split(path)
1781 names = os.listdir(directory)
1781 names = os.listdir(directory)
1782 namere = re.compile("%s.([0-9]+)" % base)
1782 namere = re.compile("%s.([0-9]+)" % base)
1783 maxindex = None
1783 maxindex = None
1784 maxname = None
1784 maxname = None
1785 for f in names:
1785 for f in names:
1786 m = namere.match(f)
1786 m = namere.match(f)
1787 if m:
1787 if m:
1788 index = int(m.group(1))
1788 index = int(m.group(1))
1789 if maxindex == None or index > maxindex:
1789 if maxindex == None or index > maxindex:
1790 maxindex = index
1790 maxindex = index
1791 maxname = f
1791 maxname = f
1792 if maxname:
1792 if maxname:
1793 return (os.path.join(directory, maxname), maxindex)
1793 return (os.path.join(directory, maxname), maxindex)
1794 return (None, None)
1794 return (None, None)
1795
1795
1796 def savename(path):
1796 def savename(path):
1797 (last, index) = lastsavename(path)
1797 (last, index) = lastsavename(path)
1798 if last is None:
1798 if last is None:
1799 index = 0
1799 index = 0
1800 newpath = path + ".%d" % (index + 1)
1800 newpath = path + ".%d" % (index + 1)
1801 return newpath
1801 return newpath
1802
1802
1803 def push(ui, repo, patch=None, **opts):
1803 def push(ui, repo, patch=None, **opts):
1804 """push the next patch onto the stack"""
1804 """push the next patch onto the stack"""
1805 q = repo.mq
1805 q = repo.mq
1806 mergeq = None
1806 mergeq = None
1807
1807
1808 if opts['all']:
1808 if opts['all']:
1809 if not q.series:
1809 if not q.series:
1810 ui.warn(_('no patches in series\n'))
1810 ui.warn(_('no patches in series\n'))
1811 return 0
1811 return 0
1812 patch = q.series[-1]
1812 patch = q.series[-1]
1813 if opts['merge']:
1813 if opts['merge']:
1814 if opts['name']:
1814 if opts['name']:
1815 newpath = opts['name']
1815 newpath = opts['name']
1816 else:
1816 else:
1817 newpath, i = lastsavename(q.path)
1817 newpath, i = lastsavename(q.path)
1818 if not newpath:
1818 if not newpath:
1819 ui.warn("no saved queues found, please use -n\n")
1819 ui.warn("no saved queues found, please use -n\n")
1820 return 1
1820 return 1
1821 mergeq = queue(ui, repo.join(""), newpath)
1821 mergeq = queue(ui, repo.join(""), newpath)
1822 ui.warn("merging with queue at: %s\n" % mergeq.path)
1822 ui.warn("merging with queue at: %s\n" % mergeq.path)
1823 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1823 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1824 mergeq=mergeq)
1824 mergeq=mergeq)
1825 return ret
1825 return ret
1826
1826
1827 def pop(ui, repo, patch=None, **opts):
1827 def pop(ui, repo, patch=None, **opts):
1828 """pop the current patch off the stack"""
1828 """pop the current patch off the stack"""
1829 localupdate = True
1829 localupdate = True
1830 if opts['name']:
1830 if opts['name']:
1831 q = queue(ui, repo.join(""), repo.join(opts['name']))
1831 q = queue(ui, repo.join(""), repo.join(opts['name']))
1832 ui.warn('using patch queue: %s\n' % q.path)
1832 ui.warn('using patch queue: %s\n' % q.path)
1833 localupdate = False
1833 localupdate = False
1834 else:
1834 else:
1835 q = repo.mq
1835 q = repo.mq
1836 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1836 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1837 all=opts['all'])
1837 all=opts['all'])
1838 q.save_dirty()
1838 q.save_dirty()
1839 return ret
1839 return ret
1840
1840
1841 def rename(ui, repo, patch, name=None, **opts):
1841 def rename(ui, repo, patch, name=None, **opts):
1842 """rename a patch
1842 """rename a patch
1843
1843
1844 With one argument, renames the current patch to PATCH1.
1844 With one argument, renames the current patch to PATCH1.
1845 With two arguments, renames PATCH1 to PATCH2."""
1845 With two arguments, renames PATCH1 to PATCH2."""
1846
1846
1847 q = repo.mq
1847 q = repo.mq
1848
1848
1849 if not name:
1849 if not name:
1850 name = patch
1850 name = patch
1851 patch = None
1851 patch = None
1852
1852
1853 if patch:
1853 if patch:
1854 patch = q.lookup(patch)
1854 patch = q.lookup(patch)
1855 else:
1855 else:
1856 if not q.applied:
1856 if not q.applied:
1857 ui.write(_('No patches applied\n'))
1857 ui.write(_('No patches applied\n'))
1858 return
1858 return
1859 patch = q.lookup('qtip')
1859 patch = q.lookup('qtip')
1860 absdest = q.join(name)
1860 absdest = q.join(name)
1861 if os.path.isdir(absdest):
1861 if os.path.isdir(absdest):
1862 name = normname(os.path.join(name, os.path.basename(patch)))
1862 name = normname(os.path.join(name, os.path.basename(patch)))
1863 absdest = q.join(name)
1863 absdest = q.join(name)
1864 if os.path.exists(absdest):
1864 if os.path.exists(absdest):
1865 raise util.Abort(_('%s already exists') % absdest)
1865 raise util.Abort(_('%s already exists') % absdest)
1866
1866
1867 if name in q.series:
1867 if name in q.series:
1868 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1868 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1869
1869
1870 if ui.verbose:
1870 if ui.verbose:
1871 ui.write('Renaming %s to %s\n' % (patch, name))
1871 ui.write('Renaming %s to %s\n' % (patch, name))
1872 i = q.find_series(patch)
1872 i = q.find_series(patch)
1873 guards = q.guard_re.findall(q.full_series[i])
1873 guards = q.guard_re.findall(q.full_series[i])
1874 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1874 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1875 q.parse_series()
1875 q.parse_series()
1876 q.series_dirty = 1
1876 q.series_dirty = 1
1877
1877
1878 info = q.isapplied(patch)
1878 info = q.isapplied(patch)
1879 if info:
1879 if info:
1880 q.applied[info[0]] = statusentry(info[1], name)
1880 q.applied[info[0]] = statusentry(info[1], name)
1881 q.applied_dirty = 1
1881 q.applied_dirty = 1
1882
1882
1883 util.rename(q.join(patch), absdest)
1883 util.rename(q.join(patch), absdest)
1884 r = q.qrepo()
1884 r = q.qrepo()
1885 if r:
1885 if r:
1886 wlock = r.wlock()
1886 wlock = r.wlock()
1887 try:
1887 try:
1888 if r.dirstate[name] == 'r':
1888 if r.dirstate[name] == 'r':
1889 r.undelete([name])
1889 r.undelete([name])
1890 r.copy(patch, name)
1890 r.copy(patch, name)
1891 r.remove([patch], False)
1891 r.remove([patch], False)
1892 finally:
1892 finally:
1893 del wlock
1893 del wlock
1894
1894
1895 q.save_dirty()
1895 q.save_dirty()
1896
1896
1897 def restore(ui, repo, rev, **opts):
1897 def restore(ui, repo, rev, **opts):
1898 """restore the queue state saved by a rev"""
1898 """restore the queue state saved by a rev"""
1899 rev = repo.lookup(rev)
1899 rev = repo.lookup(rev)
1900 q = repo.mq
1900 q = repo.mq
1901 q.restore(repo, rev, delete=opts['delete'],
1901 q.restore(repo, rev, delete=opts['delete'],
1902 qupdate=opts['update'])
1902 qupdate=opts['update'])
1903 q.save_dirty()
1903 q.save_dirty()
1904 return 0
1904 return 0
1905
1905
1906 def save(ui, repo, **opts):
1906 def save(ui, repo, **opts):
1907 """save current queue state"""
1907 """save current queue state"""
1908 q = repo.mq
1908 q = repo.mq
1909 message = cmdutil.logmessage(opts)
1909 message = cmdutil.logmessage(opts)
1910 ret = q.save(repo, msg=message)
1910 ret = q.save(repo, msg=message)
1911 if ret:
1911 if ret:
1912 return ret
1912 return ret
1913 q.save_dirty()
1913 q.save_dirty()
1914 if opts['copy']:
1914 if opts['copy']:
1915 path = q.path
1915 path = q.path
1916 if opts['name']:
1916 if opts['name']:
1917 newpath = os.path.join(q.basepath, opts['name'])
1917 newpath = os.path.join(q.basepath, opts['name'])
1918 if os.path.exists(newpath):
1918 if os.path.exists(newpath):
1919 if not os.path.isdir(newpath):
1919 if not os.path.isdir(newpath):
1920 raise util.Abort(_('destination %s exists and is not '
1920 raise util.Abort(_('destination %s exists and is not '
1921 'a directory') % newpath)
1921 'a directory') % newpath)
1922 if not opts['force']:
1922 if not opts['force']:
1923 raise util.Abort(_('destination %s exists, '
1923 raise util.Abort(_('destination %s exists, '
1924 'use -f to force') % newpath)
1924 'use -f to force') % newpath)
1925 else:
1925 else:
1926 newpath = savename(path)
1926 newpath = savename(path)
1927 ui.warn("copy %s to %s\n" % (path, newpath))
1927 ui.warn("copy %s to %s\n" % (path, newpath))
1928 util.copyfiles(path, newpath)
1928 util.copyfiles(path, newpath)
1929 if opts['empty']:
1929 if opts['empty']:
1930 try:
1930 try:
1931 os.unlink(q.join(q.status_path))
1931 os.unlink(q.join(q.status_path))
1932 except:
1932 except:
1933 pass
1933 pass
1934 return 0
1934 return 0
1935
1935
1936 def strip(ui, repo, rev, **opts):
1936 def strip(ui, repo, rev, **opts):
1937 """strip a revision and all later revs on the same branch"""
1937 """strip a revision and all later revs on the same branch"""
1938 rev = repo.lookup(rev)
1938 rev = repo.lookup(rev)
1939 backup = 'all'
1939 backup = 'all'
1940 if opts['backup']:
1940 if opts['backup']:
1941 backup = 'strip'
1941 backup = 'strip'
1942 elif opts['nobackup']:
1942 elif opts['nobackup']:
1943 backup = 'none'
1943 backup = 'none'
1944 update = repo.dirstate.parents()[0] != revlog.nullid
1944 update = repo.dirstate.parents()[0] != revlog.nullid
1945 repo.mq.strip(repo, rev, backup=backup, update=update)
1945 repo.mq.strip(repo, rev, backup=backup, update=update)
1946 return 0
1946 return 0
1947
1947
1948 def select(ui, repo, *args, **opts):
1948 def select(ui, repo, *args, **opts):
1949 '''set or print guarded patches to push
1949 '''set or print guarded patches to push
1950
1950
1951 Use the qguard command to set or print guards on patch, then use
1951 Use the qguard command to set or print guards on patch, then use
1952 qselect to tell mq which guards to use. A patch will be pushed if it
1952 qselect to tell mq which guards to use. A patch will be pushed if it
1953 has no guards or any positive guards match the currently selected guard,
1953 has no guards or any positive guards match the currently selected guard,
1954 but will not be pushed if any negative guards match the current guard.
1954 but will not be pushed if any negative guards match the current guard.
1955 For example:
1955 For example:
1956
1956
1957 qguard foo.patch -stable (negative guard)
1957 qguard foo.patch -stable (negative guard)
1958 qguard bar.patch +stable (positive guard)
1958 qguard bar.patch +stable (positive guard)
1959 qselect stable
1959 qselect stable
1960
1960
1961 This activates the "stable" guard. mq will skip foo.patch (because
1961 This activates the "stable" guard. mq will skip foo.patch (because
1962 it has a negative match) but push bar.patch (because it
1962 it has a negative match) but push bar.patch (because it
1963 has a positive match).
1963 has a positive match).
1964
1964
1965 With no arguments, prints the currently active guards.
1965 With no arguments, prints the currently active guards.
1966 With one argument, sets the active guard.
1966 With one argument, sets the active guard.
1967
1967
1968 Use -n/--none to deactivate guards (no other arguments needed).
1968 Use -n/--none to deactivate guards (no other arguments needed).
1969 When no guards are active, patches with positive guards are skipped
1969 When no guards are active, patches with positive guards are skipped
1970 and patches with negative guards are pushed.
1970 and patches with negative guards are pushed.
1971
1971
1972 qselect can change the guards on applied patches. It does not pop
1972 qselect can change the guards on applied patches. It does not pop
1973 guarded patches by default. Use --pop to pop back to the last applied
1973 guarded patches by default. Use --pop to pop back to the last applied
1974 patch that is not guarded. Use --reapply (which implies --pop) to push
1974 patch that is not guarded. Use --reapply (which implies --pop) to push
1975 back to the current patch afterwards, but skip guarded patches.
1975 back to the current patch afterwards, but skip guarded patches.
1976
1976
1977 Use -s/--series to print a list of all guards in the series file (no
1977 Use -s/--series to print a list of all guards in the series file (no
1978 other arguments needed). Use -v for more information.'''
1978 other arguments needed). Use -v for more information.'''
1979
1979
1980 q = repo.mq
1980 q = repo.mq
1981 guards = q.active()
1981 guards = q.active()
1982 if args or opts['none']:
1982 if args or opts['none']:
1983 old_unapplied = q.unapplied(repo)
1983 old_unapplied = q.unapplied(repo)
1984 old_guarded = [i for i in xrange(len(q.applied)) if
1984 old_guarded = [i for i in xrange(len(q.applied)) if
1985 not q.pushable(i)[0]]
1985 not q.pushable(i)[0]]
1986 q.set_active(args)
1986 q.set_active(args)
1987 q.save_dirty()
1987 q.save_dirty()
1988 if not args:
1988 if not args:
1989 ui.status(_('guards deactivated\n'))
1989 ui.status(_('guards deactivated\n'))
1990 if not opts['pop'] and not opts['reapply']:
1990 if not opts['pop'] and not opts['reapply']:
1991 unapplied = q.unapplied(repo)
1991 unapplied = q.unapplied(repo)
1992 guarded = [i for i in xrange(len(q.applied))
1992 guarded = [i for i in xrange(len(q.applied))
1993 if not q.pushable(i)[0]]
1993 if not q.pushable(i)[0]]
1994 if len(unapplied) != len(old_unapplied):
1994 if len(unapplied) != len(old_unapplied):
1995 ui.status(_('number of unguarded, unapplied patches has '
1995 ui.status(_('number of unguarded, unapplied patches has '
1996 'changed from %d to %d\n') %
1996 'changed from %d to %d\n') %
1997 (len(old_unapplied), len(unapplied)))
1997 (len(old_unapplied), len(unapplied)))
1998 if len(guarded) != len(old_guarded):
1998 if len(guarded) != len(old_guarded):
1999 ui.status(_('number of guarded, applied patches has changed '
1999 ui.status(_('number of guarded, applied patches has changed '
2000 'from %d to %d\n') %
2000 'from %d to %d\n') %
2001 (len(old_guarded), len(guarded)))
2001 (len(old_guarded), len(guarded)))
2002 elif opts['series']:
2002 elif opts['series']:
2003 guards = {}
2003 guards = {}
2004 noguards = 0
2004 noguards = 0
2005 for gs in q.series_guards:
2005 for gs in q.series_guards:
2006 if not gs:
2006 if not gs:
2007 noguards += 1
2007 noguards += 1
2008 for g in gs:
2008 for g in gs:
2009 guards.setdefault(g, 0)
2009 guards.setdefault(g, 0)
2010 guards[g] += 1
2010 guards[g] += 1
2011 if ui.verbose:
2011 if ui.verbose:
2012 guards['NONE'] = noguards
2012 guards['NONE'] = noguards
2013 guards = guards.items()
2013 guards = guards.items()
2014 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2014 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2015 if guards:
2015 if guards:
2016 ui.note(_('guards in series file:\n'))
2016 ui.note(_('guards in series file:\n'))
2017 for guard, count in guards:
2017 for guard, count in guards:
2018 ui.note('%2d ' % count)
2018 ui.note('%2d ' % count)
2019 ui.write(guard, '\n')
2019 ui.write(guard, '\n')
2020 else:
2020 else:
2021 ui.note(_('no guards in series file\n'))
2021 ui.note(_('no guards in series file\n'))
2022 else:
2022 else:
2023 if guards:
2023 if guards:
2024 ui.note(_('active guards:\n'))
2024 ui.note(_('active guards:\n'))
2025 for g in guards:
2025 for g in guards:
2026 ui.write(g, '\n')
2026 ui.write(g, '\n')
2027 else:
2027 else:
2028 ui.write(_('no active guards\n'))
2028 ui.write(_('no active guards\n'))
2029 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2029 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2030 popped = False
2030 popped = False
2031 if opts['pop'] or opts['reapply']:
2031 if opts['pop'] or opts['reapply']:
2032 for i in xrange(len(q.applied)):
2032 for i in xrange(len(q.applied)):
2033 pushable, reason = q.pushable(i)
2033 pushable, reason = q.pushable(i)
2034 if not pushable:
2034 if not pushable:
2035 ui.status(_('popping guarded patches\n'))
2035 ui.status(_('popping guarded patches\n'))
2036 popped = True
2036 popped = True
2037 if i == 0:
2037 if i == 0:
2038 q.pop(repo, all=True)
2038 q.pop(repo, all=True)
2039 else:
2039 else:
2040 q.pop(repo, i-1)
2040 q.pop(repo, i-1)
2041 break
2041 break
2042 if popped:
2042 if popped:
2043 try:
2043 try:
2044 if reapply:
2044 if reapply:
2045 ui.status(_('reapplying unguarded patches\n'))
2045 ui.status(_('reapplying unguarded patches\n'))
2046 q.push(repo, reapply)
2046 q.push(repo, reapply)
2047 finally:
2047 finally:
2048 q.save_dirty()
2048 q.save_dirty()
2049
2049
2050 def reposetup(ui, repo):
2050 def reposetup(ui, repo):
2051 class mqrepo(repo.__class__):
2051 class mqrepo(repo.__class__):
2052 def abort_if_wdir_patched(self, errmsg, force=False):
2052 def abort_if_wdir_patched(self, errmsg, force=False):
2053 if self.mq.applied and not force:
2053 if self.mq.applied and not force:
2054 parent = revlog.hex(self.dirstate.parents()[0])
2054 parent = revlog.hex(self.dirstate.parents()[0])
2055 if parent in [s.rev for s in self.mq.applied]:
2055 if parent in [s.rev for s in self.mq.applied]:
2056 raise util.Abort(errmsg)
2056 raise util.Abort(errmsg)
2057
2057
2058 def commit(self, *args, **opts):
2058 def commit(self, *args, **opts):
2059 if len(args) >= 6:
2059 if len(args) >= 6:
2060 force = args[5]
2060 force = args[5]
2061 else:
2061 else:
2062 force = opts.get('force')
2062 force = opts.get('force')
2063 self.abort_if_wdir_patched(
2063 self.abort_if_wdir_patched(
2064 _('cannot commit over an applied mq patch'),
2064 _('cannot commit over an applied mq patch'),
2065 force)
2065 force)
2066
2066
2067 return super(mqrepo, self).commit(*args, **opts)
2067 return super(mqrepo, self).commit(*args, **opts)
2068
2068
2069 def push(self, remote, force=False, revs=None):
2069 def push(self, remote, force=False, revs=None):
2070 if self.mq.applied and not force and not revs:
2070 if self.mq.applied and not force and not revs:
2071 raise util.Abort(_('source has mq patches applied'))
2071 raise util.Abort(_('source has mq patches applied'))
2072 return super(mqrepo, self).push(remote, force, revs)
2072 return super(mqrepo, self).push(remote, force, revs)
2073
2073
2074 def tags(self):
2074 def tags(self):
2075 if self.tagscache:
2075 if self.tagscache:
2076 return self.tagscache
2076 return self.tagscache
2077
2077
2078 tagscache = super(mqrepo, self).tags()
2078 tagscache = super(mqrepo, self).tags()
2079
2079
2080 q = self.mq
2080 q = self.mq
2081 if not q.applied:
2081 if not q.applied:
2082 return tagscache
2082 return tagscache
2083
2083
2084 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2084 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2085 mqtags.append((mqtags[-1][0], 'qtip'))
2085 mqtags.append((mqtags[-1][0], 'qtip'))
2086 mqtags.append((mqtags[0][0], 'qbase'))
2086 mqtags.append((mqtags[0][0], 'qbase'))
2087 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2087 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2088 for patch in mqtags:
2088 for patch in mqtags:
2089 if patch[1] in tagscache:
2089 if patch[1] in tagscache:
2090 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2090 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2091 else:
2091 else:
2092 tagscache[patch[1]] = patch[0]
2092 tagscache[patch[1]] = patch[0]
2093
2093
2094 return tagscache
2094 return tagscache
2095
2095
2096 def _branchtags(self):
2096 def _branchtags(self):
2097 q = self.mq
2097 q = self.mq
2098 if not q.applied:
2098 if not q.applied:
2099 return super(mqrepo, self)._branchtags()
2099 return super(mqrepo, self)._branchtags()
2100
2100
2101 self.branchcache = {} # avoid recursion in changectx
2101 self.branchcache = {} # avoid recursion in changectx
2102 cl = self.changelog
2102 cl = self.changelog
2103 partial, last, lrev = self._readbranchcache()
2103 partial, last, lrev = self._readbranchcache()
2104
2104
2105 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2105 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2106 start = lrev + 1
2106 start = lrev + 1
2107 if start < qbase:
2107 if start < qbase:
2108 # update the cache (excluding the patches) and save it
2108 # update the cache (excluding the patches) and save it
2109 self._updatebranchcache(partial, lrev+1, qbase)
2109 self._updatebranchcache(partial, lrev+1, qbase)
2110 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2110 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2111 start = qbase
2111 start = qbase
2112 # if start = qbase, the cache is as updated as it should be.
2112 # if start = qbase, the cache is as updated as it should be.
2113 # if start > qbase, the cache includes (part of) the patches.
2113 # if start > qbase, the cache includes (part of) the patches.
2114 # we might as well use it, but we won't save it.
2114 # we might as well use it, but we won't save it.
2115
2115
2116 # update the cache up to the tip
2116 # update the cache up to the tip
2117 self._updatebranchcache(partial, start, cl.count())
2117 self._updatebranchcache(partial, start, cl.count())
2118
2118
2119 return partial
2119 return partial
2120
2120
2121 if repo.local():
2121 if repo.local():
2122 repo.__class__ = mqrepo
2122 repo.__class__ = mqrepo
2123 repo.mq = queue(ui, repo.join(""))
2123 repo.mq = queue(ui, repo.join(""))
2124
2124
2125 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2125 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2126
2126
2127 cmdtable = {
2127 cmdtable = {
2128 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2128 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2129 "qclone":
2129 "qclone":
2130 (clone,
2130 (clone,
2131 [('', 'pull', None, _('use pull protocol to copy metadata')),
2131 [('', 'pull', None, _('use pull protocol to copy metadata')),
2132 ('U', 'noupdate', None, _('do not update the new working directories')),
2132 ('U', 'noupdate', None, _('do not update the new working directories')),
2133 ('', 'uncompressed', None,
2133 ('', 'uncompressed', None,
2134 _('use uncompressed transfer (fast over LAN)')),
2134 _('use uncompressed transfer (fast over LAN)')),
2135 ('p', 'patches', '', _('location of source patch repo')),
2135 ('p', 'patches', '', _('location of source patch repo')),
2136 ] + commands.remoteopts,
2136 ] + commands.remoteopts,
2137 _('hg qclone [OPTION]... SOURCE [DEST]')),
2137 _('hg qclone [OPTION]... SOURCE [DEST]')),
2138 "qcommit|qci":
2138 "qcommit|qci":
2139 (commit,
2139 (commit,
2140 commands.table["^commit|ci"][1],
2140 commands.table["^commit|ci"][1],
2141 _('hg qcommit [OPTION]... [FILE]...')),
2141 _('hg qcommit [OPTION]... [FILE]...')),
2142 "^qdiff":
2142 "^qdiff":
2143 (diff,
2143 (diff,
2144 [('g', 'git', None, _('use git extended diff format')),
2144 [('g', 'git', None, _('use git extended diff format')),
2145 ] + commands.walkopts,
2145 ] + commands.walkopts,
2146 _('hg qdiff [-I] [-X] [-g] [FILE]...')),
2146 _('hg qdiff [-I] [-X] [-g] [FILE]...')),
2147 "qdelete|qremove|qrm":
2147 "qdelete|qremove|qrm":
2148 (delete,
2148 (delete,
2149 [('k', 'keep', None, _('keep patch file')),
2149 [('k', 'keep', None, _('keep patch file')),
2150 ('r', 'rev', [], _('stop managing a revision'))],
2150 ('r', 'rev', [], _('stop managing a revision'))],
2151 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2151 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2152 'qfold':
2152 'qfold':
2153 (fold,
2153 (fold,
2154 [('e', 'edit', None, _('edit patch header')),
2154 [('e', 'edit', None, _('edit patch header')),
2155 ('k', 'keep', None, _('keep folded patch files')),
2155 ('k', 'keep', None, _('keep folded patch files')),
2156 ] + commands.commitopts,
2156 ] + commands.commitopts,
2157 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2157 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2158 'qgoto':
2158 'qgoto':
2159 (goto,
2159 (goto,
2160 [('f', 'force', None, _('overwrite any local changes'))],
2160 [('f', 'force', None, _('overwrite any local changes'))],
2161 _('hg qgoto [OPTION]... PATCH')),
2161 _('hg qgoto [OPTION]... PATCH')),
2162 'qguard':
2162 'qguard':
2163 (guard,
2163 (guard,
2164 [('l', 'list', None, _('list all patches and guards')),
2164 [('l', 'list', None, _('list all patches and guards')),
2165 ('n', 'none', None, _('drop all guards'))],
2165 ('n', 'none', None, _('drop all guards'))],
2166 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2166 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2167 'qheader': (header, [], _('hg qheader [PATCH]')),
2167 'qheader': (header, [], _('hg qheader [PATCH]')),
2168 "^qimport":
2168 "^qimport":
2169 (qimport,
2169 (qimport,
2170 [('e', 'existing', None, 'import file in patch dir'),
2170 [('e', 'existing', None, 'import file in patch dir'),
2171 ('n', 'name', '', 'patch file name'),
2171 ('n', 'name', '', 'patch file name'),
2172 ('f', 'force', None, 'overwrite existing files'),
2172 ('f', 'force', None, 'overwrite existing files'),
2173 ('r', 'rev', [], 'place existing revisions under mq control'),
2173 ('r', 'rev', [], 'place existing revisions under mq control'),
2174 ('g', 'git', None, _('use git extended diff format'))],
2174 ('g', 'git', None, _('use git extended diff format'))],
2175 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2175 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2176 "^qinit":
2176 "^qinit":
2177 (init,
2177 (init,
2178 [('c', 'create-repo', None, 'create queue repository')],
2178 [('c', 'create-repo', None, 'create queue repository')],
2179 _('hg qinit [-c]')),
2179 _('hg qinit [-c]')),
2180 "qnew":
2180 "qnew":
2181 (new,
2181 (new,
2182 [('e', 'edit', None, _('edit commit message')),
2182 [('e', 'edit', None, _('edit commit message')),
2183 ('f', 'force', None, _('import uncommitted changes into patch')),
2183 ('f', 'force', None, _('import uncommitted changes into patch')),
2184 ('g', 'git', None, _('use git extended diff format')),
2184 ('g', 'git', None, _('use git extended diff format')),
2185 ] + commands.walkopts + commands.commitopts,
2185 ] + commands.walkopts + commands.commitopts,
2186 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2186 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2187 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2187 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2188 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2188 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2189 "^qpop":
2189 "^qpop":
2190 (pop,
2190 (pop,
2191 [('a', 'all', None, _('pop all patches')),
2191 [('a', 'all', None, _('pop all patches')),
2192 ('n', 'name', '', _('queue name to pop')),
2192 ('n', 'name', '', _('queue name to pop')),
2193 ('f', 'force', None, _('forget any local changes'))],
2193 ('f', 'force', None, _('forget any local changes'))],
2194 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2194 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2195 "^qpush":
2195 "^qpush":
2196 (push,
2196 (push,
2197 [('f', 'force', None, _('apply if the patch has rejects')),
2197 [('f', 'force', None, _('apply if the patch has rejects')),
2198 ('l', 'list', None, _('list patch name in commit text')),
2198 ('l', 'list', None, _('list patch name in commit text')),
2199 ('a', 'all', None, _('apply all patches')),
2199 ('a', 'all', None, _('apply all patches')),
2200 ('m', 'merge', None, _('merge from another queue')),
2200 ('m', 'merge', None, _('merge from another queue')),
2201 ('n', 'name', '', _('merge queue name'))],
2201 ('n', 'name', '', _('merge queue name'))],
2202 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2202 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2203 "^qrefresh":
2203 "^qrefresh":
2204 (refresh,
2204 (refresh,
2205 [('e', 'edit', None, _('edit commit message')),
2205 [('e', 'edit', None, _('edit commit message')),
2206 ('g', 'git', None, _('use git extended diff format')),
2206 ('g', 'git', None, _('use git extended diff format')),
2207 ('s', 'short', None, _('refresh only files already in the patch')),
2207 ('s', 'short', None, _('refresh only files already in the patch')),
2208 ] + commands.walkopts + commands.commitopts,
2208 ] + commands.walkopts + commands.commitopts,
2209 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2209 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2210 'qrename|qmv':
2210 'qrename|qmv':
2211 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2211 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2212 "qrestore":
2212 "qrestore":
2213 (restore,
2213 (restore,
2214 [('d', 'delete', None, _('delete save entry')),
2214 [('d', 'delete', None, _('delete save entry')),
2215 ('u', 'update', None, _('update queue working dir'))],
2215 ('u', 'update', None, _('update queue working dir'))],
2216 _('hg qrestore [-d] [-u] REV')),
2216 _('hg qrestore [-d] [-u] REV')),
2217 "qsave":
2217 "qsave":
2218 (save,
2218 (save,
2219 [('c', 'copy', None, _('copy patch directory')),
2219 [('c', 'copy', None, _('copy patch directory')),
2220 ('n', 'name', '', _('copy directory name')),
2220 ('n', 'name', '', _('copy directory name')),
2221 ('e', 'empty', None, _('clear queue status file')),
2221 ('e', 'empty', None, _('clear queue status file')),
2222 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2222 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2223 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2223 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2224 "qselect":
2224 "qselect":
2225 (select,
2225 (select,
2226 [('n', 'none', None, _('disable all guards')),
2226 [('n', 'none', None, _('disable all guards')),
2227 ('s', 'series', None, _('list all guards in series file')),
2227 ('s', 'series', None, _('list all guards in series file')),
2228 ('', 'pop', None, _('pop to before first guarded applied patch')),
2228 ('', 'pop', None, _('pop to before first guarded applied patch')),
2229 ('', 'reapply', None, _('pop, then reapply patches'))],
2229 ('', 'reapply', None, _('pop, then reapply patches'))],
2230 _('hg qselect [OPTION]... [GUARD]...')),
2230 _('hg qselect [OPTION]... [GUARD]...')),
2231 "qseries":
2231 "qseries":
2232 (series,
2232 (series,
2233 [('m', 'missing', None, _('print patches not in series')),
2233 [('m', 'missing', None, _('print patches not in series')),
2234 ] + seriesopts,
2234 ] + seriesopts,
2235 _('hg qseries [-ms]')),
2235 _('hg qseries [-ms]')),
2236 "^strip":
2236 "^strip":
2237 (strip,
2237 (strip,
2238 [('f', 'force', None, _('force multi-head removal')),
2238 [('f', 'force', None, _('force multi-head removal')),
2239 ('b', 'backup', None, _('bundle unrelated changesets')),
2239 ('b', 'backup', None, _('bundle unrelated changesets')),
2240 ('n', 'nobackup', None, _('no backups'))],
2240 ('n', 'nobackup', None, _('no backups'))],
2241 _('hg strip [-f] [-b] [-n] REV')),
2241 _('hg strip [-f] [-b] [-n] REV')),
2242 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2242 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2243 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2243 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2244 }
2244 }
@@ -1,540 +1,547 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import *
10 from node import *
11 from i18n import _
11 from i18n import _
12 import struct, os, time, bisect, stat, strutil, util, re, errno, ignore
12 import struct, os, time, bisect, stat, strutil, util, re, errno, ignore
13 import cStringIO
13 import cStringIO
14
14
15 _unknown = ('?', 0, 0, 0)
15 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
16 _format = ">cllll"
17
17
18 class dirstate(object):
18 class dirstate(object):
19
19
20 def __init__(self, opener, ui, root):
20 def __init__(self, opener, ui, root):
21 self._opener = opener
21 self._opener = opener
22 self._root = root
22 self._root = root
23 self._dirty = False
23 self._dirty = False
24 self._dirtypl = False
24 self._dirtypl = False
25 self._ui = ui
25 self._ui = ui
26
26
27 def __getattr__(self, name):
27 def __getattr__(self, name):
28 if name == '_map':
28 if name == '_map':
29 self._read()
29 self._read()
30 return self._map
30 return self._map
31 elif name == '_copymap':
31 elif name == '_copymap':
32 self._read()
32 self._read()
33 return self._copymap
33 return self._copymap
34 elif name == '_branch':
34 elif name == '_branch':
35 try:
35 try:
36 self._branch = (self._opener("branch").read().strip()
36 self._branch = (self._opener("branch").read().strip()
37 or "default")
37 or "default")
38 except IOError:
38 except IOError:
39 self._branch = "default"
39 self._branch = "default"
40 return self._branch
40 return self._branch
41 elif name == '_pl':
41 elif name == '_pl':
42 self._pl = [nullid, nullid]
42 self._pl = [nullid, nullid]
43 try:
43 try:
44 st = self._opener("dirstate").read(40)
44 st = self._opener("dirstate").read(40)
45 if len(st) == 40:
45 if len(st) == 40:
46 self._pl = st[:20], st[20:40]
46 self._pl = st[:20], st[20:40]
47 except IOError, err:
47 except IOError, err:
48 if err.errno != errno.ENOENT: raise
48 if err.errno != errno.ENOENT: raise
49 return self._pl
49 return self._pl
50 elif name == '_dirs':
50 elif name == '_dirs':
51 self._dirs = {}
51 self._dirs = {}
52 for f in self._map:
52 for f in self._map:
53 self._incpath(f)
53 self._incpath(f)
54 return self._dirs
54 return self._dirs
55 elif name == '_ignore':
55 elif name == '_ignore':
56 files = [self._join('.hgignore')]
56 files = [self._join('.hgignore')]
57 for name, path in self._ui.configitems("ui"):
57 for name, path in self._ui.configitems("ui"):
58 if name == 'ignore' or name.startswith('ignore.'):
58 if name == 'ignore' or name.startswith('ignore.'):
59 files.append(os.path.expanduser(path))
59 files.append(os.path.expanduser(path))
60 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
60 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
61 return self._ignore
61 return self._ignore
62 elif name == '_slash':
62 elif name == '_slash':
63 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
63 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
64 return self._slash
64 return self._slash
65 else:
65 else:
66 raise AttributeError, name
66 raise AttributeError, name
67
67
68 def _join(self, f):
68 def _join(self, f):
69 return os.path.join(self._root, f)
69 return os.path.join(self._root, f)
70
70
71 def getcwd(self):
71 def getcwd(self):
72 cwd = os.getcwd()
72 cwd = os.getcwd()
73 if cwd == self._root: return ''
73 if cwd == self._root: return ''
74 # self._root ends with a path separator if self._root is '/' or 'C:\'
74 # self._root ends with a path separator if self._root is '/' or 'C:\'
75 rootsep = self._root
75 rootsep = self._root
76 if not rootsep.endswith(os.sep):
76 if not rootsep.endswith(os.sep):
77 rootsep += os.sep
77 rootsep += os.sep
78 if cwd.startswith(rootsep):
78 if cwd.startswith(rootsep):
79 return cwd[len(rootsep):]
79 return cwd[len(rootsep):]
80 else:
80 else:
81 # we're outside the repo. return an absolute path.
81 # we're outside the repo. return an absolute path.
82 return cwd
82 return cwd
83
83
84 def pathto(self, f, cwd=None):
84 def pathto(self, f, cwd=None):
85 if cwd is None:
85 if cwd is None:
86 cwd = self.getcwd()
86 cwd = self.getcwd()
87 path = util.pathto(self._root, cwd, f)
87 path = util.pathto(self._root, cwd, f)
88 if self._slash:
88 if self._slash:
89 return path.replace(os.sep, '/')
89 return path.replace(os.sep, '/')
90 return path
90 return path
91
91
92 def __getitem__(self, key):
92 def __getitem__(self, key):
93 ''' current states:
93 ''' current states:
94 n normal
94 n normal
95 m needs merging
95 m needs merging
96 r marked for removal
96 r marked for removal
97 a marked for addition
97 a marked for addition
98 ? not tracked'''
98 ? not tracked'''
99 return self._map.get(key, ("?",))[0]
99 return self._map.get(key, ("?",))[0]
100
100
101 def __contains__(self, key):
101 def __contains__(self, key):
102 return key in self._map
102 return key in self._map
103
103
104 def __iter__(self):
104 def __iter__(self):
105 a = self._map.keys()
105 a = self._map.keys()
106 a.sort()
106 a.sort()
107 for x in a:
107 for x in a:
108 yield x
108 yield x
109
109
110 def parents(self):
110 def parents(self):
111 return self._pl
111 return self._pl
112
112
113 def branch(self):
113 def branch(self):
114 return self._branch
114 return self._branch
115
115
116 def setparents(self, p1, p2=nullid):
116 def setparents(self, p1, p2=nullid):
117 self._dirty = self._dirtypl = True
117 self._dirty = self._dirtypl = True
118 self._pl = p1, p2
118 self._pl = p1, p2
119
119
120 def setbranch(self, branch):
120 def setbranch(self, branch):
121 self._branch = branch
121 self._branch = branch
122 self._opener("branch", "w").write(branch + '\n')
122 self._opener("branch", "w").write(branch + '\n')
123
123
124 def _read(self):
124 def _read(self):
125 self._map = {}
125 self._map = {}
126 self._copymap = {}
126 self._copymap = {}
127 if not self._dirtypl:
127 if not self._dirtypl:
128 self._pl = [nullid, nullid]
128 self._pl = [nullid, nullid]
129 try:
129 try:
130 st = self._opener("dirstate").read()
130 st = self._opener("dirstate").read()
131 except IOError, err:
131 except IOError, err:
132 if err.errno != errno.ENOENT: raise
132 if err.errno != errno.ENOENT: raise
133 return
133 return
134 if not st:
134 if not st:
135 return
135 return
136
136
137 if not self._dirtypl:
137 if not self._dirtypl:
138 self._pl = [st[:20], st[20: 40]]
138 self._pl = [st[:20], st[20: 40]]
139
139
140 # deref fields so they will be local in loop
140 # deref fields so they will be local in loop
141 dmap = self._map
141 dmap = self._map
142 copymap = self._copymap
142 copymap = self._copymap
143 unpack = struct.unpack
143 unpack = struct.unpack
144
144
145 pos = 40
145 pos = 40
146 e_size = struct.calcsize(_format)
146 e_size = struct.calcsize(_format)
147
147
148 while pos < len(st):
148 while pos < len(st):
149 newpos = pos + e_size
149 newpos = pos + e_size
150 e = unpack(_format, st[pos:newpos])
150 e = unpack(_format, st[pos:newpos])
151 l = e[4]
151 l = e[4]
152 pos = newpos
152 pos = newpos
153 newpos = pos + l
153 newpos = pos + l
154 f = st[pos:newpos]
154 f = st[pos:newpos]
155 if '\0' in f:
155 if '\0' in f:
156 f, c = f.split('\0')
156 f, c = f.split('\0')
157 copymap[f] = c
157 copymap[f] = c
158 dmap[f] = e[:4]
158 dmap[f] = e[:4]
159 pos = newpos
159 pos = newpos
160
160
161 def invalidate(self):
161 def invalidate(self):
162 for a in "_map _copymap _branch _pl _dirs _ignore".split():
162 for a in "_map _copymap _branch _pl _dirs _ignore".split():
163 if a in self.__dict__:
163 if a in self.__dict__:
164 delattr(self, a)
164 delattr(self, a)
165 self._dirty = False
165 self._dirty = False
166
166
167 def copy(self, source, dest):
167 def copy(self, source, dest):
168 self._dirty = True
168 self._dirty = True
169 self._copymap[dest] = source
169 self._copymap[dest] = source
170
170
171 def copied(self, file):
171 def copied(self, file):
172 return self._copymap.get(file, None)
172 return self._copymap.get(file, None)
173
173
174 def copies(self):
174 def copies(self):
175 return self._copymap
175 return self._copymap
176
176
177 def _incpath(self, path):
177 def _incpath(self, path):
178 for c in strutil.findall(path, '/'):
178 for c in strutil.findall(path, '/'):
179 pc = path[:c]
179 pc = path[:c]
180 self._dirs.setdefault(pc, 0)
180 self._dirs.setdefault(pc, 0)
181 self._dirs[pc] += 1
181 self._dirs[pc] += 1
182
182
183 def _decpath(self, path):
183 def _decpath(self, path):
184 for c in strutil.findall(path, '/'):
184 for c in strutil.findall(path, '/'):
185 pc = path[:c]
185 pc = path[:c]
186 self._dirs.setdefault(pc, 0)
186 self._dirs.setdefault(pc, 0)
187 self._dirs[pc] -= 1
187 self._dirs[pc] -= 1
188
188
189 def _incpathcheck(self, f):
189 def _incpathcheck(self, f):
190 if '\r' in f or '\n' in f:
190 if '\r' in f or '\n' in f:
191 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
191 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
192 # shadows
192 # shadows
193 if f in self._dirs:
193 if f in self._dirs:
194 raise util.Abort(_('directory %r already in dirstate') % f)
194 raise util.Abort(_('directory %r already in dirstate') % f)
195 for c in strutil.rfindall(f, '/'):
195 for c in strutil.rfindall(f, '/'):
196 d = f[:c]
196 d = f[:c]
197 if d in self._dirs:
197 if d in self._dirs:
198 break
198 break
199 if d in self._map:
199 if d in self._map:
200 raise util.Abort(_('file %r in dirstate clashes with %r') %
200 raise util.Abort(_('file %r in dirstate clashes with %r') %
201 (d, f))
201 (d, f))
202 self._incpath(f)
202 self._incpath(f)
203
203
204 def normal(self, f):
204 def normal(self, f):
205 'mark a file normal'
205 'mark a file normal and clean'
206 self._dirty = True
206 self._dirty = True
207 s = os.lstat(self._join(f))
207 s = os.lstat(self._join(f))
208 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime)
208 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime)
209 if self._copymap.has_key(f):
209 if self._copymap.has_key(f):
210 del self._copymap[f]
210 del self._copymap[f]
211
211
212 def normaldirty(self, f):
212 def normallookup(self, f):
213 'mark a file normal, but possibly dirty'
213 'mark a file normal, but possibly dirty'
214 self._dirty = True
214 self._dirty = True
215 s = os.lstat(self._join(f))
215 self._map[f] = ('n', 0, -1, -1)
216 self._map[f] = ('n', s.st_mode, -1, -1)
216 if f in self._copymap:
217 del self._copymap[f]
218
219 def normaldirty(self, f):
220 'mark a file normal, but dirty'
221 self._dirty = True
222 self._map[f] = ('n', 0, -2, -1)
217 if f in self._copymap:
223 if f in self._copymap:
218 del self._copymap[f]
224 del self._copymap[f]
219
225
220 def add(self, f):
226 def add(self, f):
221 'mark a file added'
227 'mark a file added'
222 self._dirty = True
228 self._dirty = True
223 self._incpathcheck(f)
229 self._incpathcheck(f)
224 self._map[f] = ('a', 0, -1, -1)
230 self._map[f] = ('a', 0, -1, -1)
225 if f in self._copymap:
231 if f in self._copymap:
226 del self._copymap[f]
232 del self._copymap[f]
227
233
228 def remove(self, f):
234 def remove(self, f):
229 'mark a file removed'
235 'mark a file removed'
230 self._dirty = True
236 self._dirty = True
231 self._map[f] = ('r', 0, 0, 0)
237 self._map[f] = ('r', 0, 0, 0)
232 self._decpath(f)
238 self._decpath(f)
233 if f in self._copymap:
239 if f in self._copymap:
234 del self._copymap[f]
240 del self._copymap[f]
235
241
236 def merge(self, f):
242 def merge(self, f):
237 'mark a file merged'
243 'mark a file merged'
238 self._dirty = True
244 self._dirty = True
239 s = os.lstat(self._join(f))
245 s = os.lstat(self._join(f))
240 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime)
246 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime)
241 if f in self._copymap:
247 if f in self._copymap:
242 del self._copymap[f]
248 del self._copymap[f]
243
249
244 def forget(self, f):
250 def forget(self, f):
245 'forget a file'
251 'forget a file'
246 self._dirty = True
252 self._dirty = True
247 try:
253 try:
248 del self._map[f]
254 del self._map[f]
249 self._decpath(f)
255 self._decpath(f)
250 except KeyError:
256 except KeyError:
251 self._ui.warn(_("not in dirstate: %s!\n") % f)
257 self._ui.warn(_("not in dirstate: %s!\n") % f)
252
258
253 def clear(self):
259 def clear(self):
254 self._map = {}
260 self._map = {}
255 self._copymap = {}
261 self._copymap = {}
256 self._pl = [nullid, nullid]
262 self._pl = [nullid, nullid]
257 self._dirty = True
263 self._dirty = True
258
264
259 def rebuild(self, parent, files):
265 def rebuild(self, parent, files):
260 self.clear()
266 self.clear()
261 for f in files:
267 for f in files:
262 if files.execf(f):
268 if files.execf(f):
263 self._map[f] = ('n', 0777, -1, 0)
269 self._map[f] = ('n', 0777, -1, 0)
264 else:
270 else:
265 self._map[f] = ('n', 0666, -1, 0)
271 self._map[f] = ('n', 0666, -1, 0)
266 self._pl = (parent, nullid)
272 self._pl = (parent, nullid)
267 self._dirty = True
273 self._dirty = True
268
274
269 def write(self):
275 def write(self):
270 if not self._dirty:
276 if not self._dirty:
271 return
277 return
272 cs = cStringIO.StringIO()
278 cs = cStringIO.StringIO()
273 cs.write("".join(self._pl))
279 cs.write("".join(self._pl))
274 for f, e in self._map.iteritems():
280 for f, e in self._map.iteritems():
275 c = self.copied(f)
281 c = self.copied(f)
276 if c:
282 if c:
277 f = f + "\0" + c
283 f = f + "\0" + c
278 e = struct.pack(_format, e[0], e[1], e[2], e[3], len(f))
284 e = struct.pack(_format, e[0], e[1], e[2], e[3], len(f))
279 cs.write(e)
285 cs.write(e)
280 cs.write(f)
286 cs.write(f)
281 st = self._opener("dirstate", "w", atomictemp=True)
287 st = self._opener("dirstate", "w", atomictemp=True)
282 st.write(cs.getvalue())
288 st.write(cs.getvalue())
283 st.rename()
289 st.rename()
284 self._dirty = self._dirtypl = False
290 self._dirty = self._dirtypl = False
285
291
286 def _filter(self, files):
292 def _filter(self, files):
287 ret = {}
293 ret = {}
288 unknown = []
294 unknown = []
289
295
290 for x in files:
296 for x in files:
291 if x == '.':
297 if x == '.':
292 return self._map.copy()
298 return self._map.copy()
293 if x not in self._map:
299 if x not in self._map:
294 unknown.append(x)
300 unknown.append(x)
295 else:
301 else:
296 ret[x] = self._map[x]
302 ret[x] = self._map[x]
297
303
298 if not unknown:
304 if not unknown:
299 return ret
305 return ret
300
306
301 b = self._map.keys()
307 b = self._map.keys()
302 b.sort()
308 b.sort()
303 blen = len(b)
309 blen = len(b)
304
310
305 for x in unknown:
311 for x in unknown:
306 bs = bisect.bisect(b, "%s%s" % (x, '/'))
312 bs = bisect.bisect(b, "%s%s" % (x, '/'))
307 while bs < blen:
313 while bs < blen:
308 s = b[bs]
314 s = b[bs]
309 if len(s) > len(x) and s.startswith(x):
315 if len(s) > len(x) and s.startswith(x):
310 ret[s] = self._map[s]
316 ret[s] = self._map[s]
311 else:
317 else:
312 break
318 break
313 bs += 1
319 bs += 1
314 return ret
320 return ret
315
321
316 def _supported(self, f, mode, verbose=False):
322 def _supported(self, f, mode, verbose=False):
317 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
323 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
318 return True
324 return True
319 if verbose:
325 if verbose:
320 kind = 'unknown'
326 kind = 'unknown'
321 if stat.S_ISCHR(mode): kind = _('character device')
327 if stat.S_ISCHR(mode): kind = _('character device')
322 elif stat.S_ISBLK(mode): kind = _('block device')
328 elif stat.S_ISBLK(mode): kind = _('block device')
323 elif stat.S_ISFIFO(mode): kind = _('fifo')
329 elif stat.S_ISFIFO(mode): kind = _('fifo')
324 elif stat.S_ISSOCK(mode): kind = _('socket')
330 elif stat.S_ISSOCK(mode): kind = _('socket')
325 elif stat.S_ISDIR(mode): kind = _('directory')
331 elif stat.S_ISDIR(mode): kind = _('directory')
326 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
332 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
327 % (self.pathto(f), kind))
333 % (self.pathto(f), kind))
328 return False
334 return False
329
335
330 def walk(self, files=None, match=util.always, badmatch=None):
336 def walk(self, files=None, match=util.always, badmatch=None):
331 # filter out the stat
337 # filter out the stat
332 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
338 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
333 yield src, f
339 yield src, f
334
340
335 def statwalk(self, files=None, match=util.always, ignored=False,
341 def statwalk(self, files=None, match=util.always, ignored=False,
336 badmatch=None, directories=False):
342 badmatch=None, directories=False):
337 '''
343 '''
338 walk recursively through the directory tree, finding all files
344 walk recursively through the directory tree, finding all files
339 matched by the match function
345 matched by the match function
340
346
341 results are yielded in a tuple (src, filename, st), where src
347 results are yielded in a tuple (src, filename, st), where src
342 is one of:
348 is one of:
343 'f' the file was found in the directory tree
349 'f' the file was found in the directory tree
344 'd' the file is a directory of the tree
350 'd' the file is a directory of the tree
345 'm' the file was only in the dirstate and not in the tree
351 'm' the file was only in the dirstate and not in the tree
346 'b' file was not found and matched badmatch
352 'b' file was not found and matched badmatch
347
353
348 and st is the stat result if the file was found in the directory.
354 and st is the stat result if the file was found in the directory.
349 '''
355 '''
350
356
351 # walk all files by default
357 # walk all files by default
352 if not files:
358 if not files:
353 files = ['.']
359 files = ['.']
354 dc = self._map.copy()
360 dc = self._map.copy()
355 else:
361 else:
356 files = util.unique(files)
362 files = util.unique(files)
357 dc = self._filter(files)
363 dc = self._filter(files)
358
364
359 def imatch(file_):
365 def imatch(file_):
360 if file_ not in dc and self._ignore(file_):
366 if file_ not in dc and self._ignore(file_):
361 return False
367 return False
362 return match(file_)
368 return match(file_)
363
369
364 ignore = self._ignore
370 ignore = self._ignore
365 if ignored:
371 if ignored:
366 imatch = match
372 imatch = match
367 ignore = util.never
373 ignore = util.never
368
374
369 # self._root may end with a path separator when self._root == '/'
375 # self._root may end with a path separator when self._root == '/'
370 common_prefix_len = len(self._root)
376 common_prefix_len = len(self._root)
371 if not self._root.endswith(os.sep):
377 if not self._root.endswith(os.sep):
372 common_prefix_len += 1
378 common_prefix_len += 1
373
379
374 normpath = util.normpath
380 normpath = util.normpath
375 listdir = os.listdir
381 listdir = os.listdir
376 lstat = os.lstat
382 lstat = os.lstat
377 bisect_left = bisect.bisect_left
383 bisect_left = bisect.bisect_left
378 isdir = os.path.isdir
384 isdir = os.path.isdir
379 pconvert = util.pconvert
385 pconvert = util.pconvert
380 join = os.path.join
386 join = os.path.join
381 s_isdir = stat.S_ISDIR
387 s_isdir = stat.S_ISDIR
382 supported = self._supported
388 supported = self._supported
383 _join = self._join
389 _join = self._join
384 known = {'.hg': 1}
390 known = {'.hg': 1}
385
391
386 # recursion free walker, faster than os.walk.
392 # recursion free walker, faster than os.walk.
387 def findfiles(s):
393 def findfiles(s):
388 work = [s]
394 work = [s]
389 wadd = work.append
395 wadd = work.append
390 found = []
396 found = []
391 add = found.append
397 add = found.append
392 if directories:
398 if directories:
393 add((normpath(s[common_prefix_len:]), 'd', lstat(s)))
399 add((normpath(s[common_prefix_len:]), 'd', lstat(s)))
394 while work:
400 while work:
395 top = work.pop()
401 top = work.pop()
396 names = listdir(top)
402 names = listdir(top)
397 names.sort()
403 names.sort()
398 # nd is the top of the repository dir tree
404 # nd is the top of the repository dir tree
399 nd = normpath(top[common_prefix_len:])
405 nd = normpath(top[common_prefix_len:])
400 if nd == '.':
406 if nd == '.':
401 nd = ''
407 nd = ''
402 else:
408 else:
403 # do not recurse into a repo contained in this
409 # do not recurse into a repo contained in this
404 # one. use bisect to find .hg directory so speed
410 # one. use bisect to find .hg directory so speed
405 # is good on big directory.
411 # is good on big directory.
406 hg = bisect_left(names, '.hg')
412 hg = bisect_left(names, '.hg')
407 if hg < len(names) and names[hg] == '.hg':
413 if hg < len(names) and names[hg] == '.hg':
408 if isdir(join(top, '.hg')):
414 if isdir(join(top, '.hg')):
409 continue
415 continue
410 for f in names:
416 for f in names:
411 np = pconvert(join(nd, f))
417 np = pconvert(join(nd, f))
412 if np in known:
418 if np in known:
413 continue
419 continue
414 known[np] = 1
420 known[np] = 1
415 p = join(top, f)
421 p = join(top, f)
416 # don't trip over symlinks
422 # don't trip over symlinks
417 st = lstat(p)
423 st = lstat(p)
418 if s_isdir(st.st_mode):
424 if s_isdir(st.st_mode):
419 if not ignore(np):
425 if not ignore(np):
420 wadd(p)
426 wadd(p)
421 if directories:
427 if directories:
422 add((np, 'd', st))
428 add((np, 'd', st))
423 if np in dc and match(np):
429 if np in dc and match(np):
424 add((np, 'm', st))
430 add((np, 'm', st))
425 elif imatch(np):
431 elif imatch(np):
426 if supported(np, st.st_mode):
432 if supported(np, st.st_mode):
427 add((np, 'f', st))
433 add((np, 'f', st))
428 elif np in dc:
434 elif np in dc:
429 add((np, 'm', st))
435 add((np, 'm', st))
430 found.sort()
436 found.sort()
431 return found
437 return found
432
438
433 # step one, find all files that match our criteria
439 # step one, find all files that match our criteria
434 files.sort()
440 files.sort()
435 for ff in files:
441 for ff in files:
436 nf = normpath(ff)
442 nf = normpath(ff)
437 f = _join(ff)
443 f = _join(ff)
438 try:
444 try:
439 st = lstat(f)
445 st = lstat(f)
440 except OSError, inst:
446 except OSError, inst:
441 found = False
447 found = False
442 for fn in dc:
448 for fn in dc:
443 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
449 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
444 found = True
450 found = True
445 break
451 break
446 if not found:
452 if not found:
447 if inst.errno != errno.ENOENT or not badmatch:
453 if inst.errno != errno.ENOENT or not badmatch:
448 self._ui.warn('%s: %s\n' %
454 self._ui.warn('%s: %s\n' %
449 (self.pathto(ff), inst.strerror))
455 (self.pathto(ff), inst.strerror))
450 elif badmatch and badmatch(ff) and imatch(nf):
456 elif badmatch and badmatch(ff) and imatch(nf):
451 yield 'b', ff, None
457 yield 'b', ff, None
452 continue
458 continue
453 if s_isdir(st.st_mode):
459 if s_isdir(st.st_mode):
454 for f, src, st in findfiles(f):
460 for f, src, st in findfiles(f):
455 yield src, f, st
461 yield src, f, st
456 else:
462 else:
457 if nf in known:
463 if nf in known:
458 continue
464 continue
459 known[nf] = 1
465 known[nf] = 1
460 if match(nf):
466 if match(nf):
461 if supported(ff, st.st_mode, verbose=True):
467 if supported(ff, st.st_mode, verbose=True):
462 yield 'f', nf, st
468 yield 'f', nf, st
463 elif ff in dc:
469 elif ff in dc:
464 yield 'm', nf, st
470 yield 'm', nf, st
465
471
466 # step two run through anything left in the dc hash and yield
472 # step two run through anything left in the dc hash and yield
467 # if we haven't already seen it
473 # if we haven't already seen it
468 ks = dc.keys()
474 ks = dc.keys()
469 ks.sort()
475 ks.sort()
470 for k in ks:
476 for k in ks:
471 if k in known:
477 if k in known:
472 continue
478 continue
473 known[k] = 1
479 known[k] = 1
474 if imatch(k):
480 if imatch(k):
475 yield 'm', k, None
481 yield 'm', k, None
476
482
477 def status(self, files, match, list_ignored, list_clean):
483 def status(self, files, match, list_ignored, list_clean):
478 lookup, modified, added, unknown, ignored = [], [], [], [], []
484 lookup, modified, added, unknown, ignored = [], [], [], [], []
479 removed, deleted, clean = [], [], []
485 removed, deleted, clean = [], [], []
480
486
481 _join = self._join
487 _join = self._join
482 lstat = os.lstat
488 lstat = os.lstat
483 cmap = self._copymap
489 cmap = self._copymap
484 dmap = self._map
490 dmap = self._map
485 ladd = lookup.append
491 ladd = lookup.append
486 madd = modified.append
492 madd = modified.append
487 aadd = added.append
493 aadd = added.append
488 uadd = unknown.append
494 uadd = unknown.append
489 iadd = ignored.append
495 iadd = ignored.append
490 radd = removed.append
496 radd = removed.append
491 dadd = deleted.append
497 dadd = deleted.append
492 cadd = clean.append
498 cadd = clean.append
493
499
494 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
500 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
495 if fn in dmap:
501 if fn in dmap:
496 type_, mode, size, time = dmap[fn]
502 type_, mode, size, time = dmap[fn]
497 else:
503 else:
498 if list_ignored and self._ignore(fn):
504 if list_ignored and self._ignore(fn):
499 iadd(fn)
505 iadd(fn)
500 else:
506 else:
501 uadd(fn)
507 uadd(fn)
502 continue
508 continue
503 if src == 'm':
509 if src == 'm':
504 nonexistent = True
510 nonexistent = True
505 if not st:
511 if not st:
506 try:
512 try:
507 st = lstat(_join(fn))
513 st = lstat(_join(fn))
508 except OSError, inst:
514 except OSError, inst:
509 if inst.errno != errno.ENOENT:
515 if inst.errno != errno.ENOENT:
510 raise
516 raise
511 st = None
517 st = None
512 # We need to re-check that it is a valid file
518 # We need to re-check that it is a valid file
513 if st and self._supported(fn, st.st_mode):
519 if st and self._supported(fn, st.st_mode):
514 nonexistent = False
520 nonexistent = False
515 # XXX: what to do with file no longer present in the fs
521 # XXX: what to do with file no longer present in the fs
516 # who are not removed in the dirstate ?
522 # who are not removed in the dirstate ?
517 if nonexistent and type_ in "nm":
523 if nonexistent and type_ in "nm":
518 dadd(fn)
524 dadd(fn)
519 continue
525 continue
520 # check the common case first
526 # check the common case first
521 if type_ == 'n':
527 if type_ == 'n':
522 if not st:
528 if not st:
523 st = lstat(_join(fn))
529 st = lstat(_join(fn))
524 if (size >= 0 and (size != st.st_size
530 if (size >= 0 and (size != st.st_size
525 or (mode ^ st.st_mode) & 0100)
531 or (mode ^ st.st_mode) & 0100)
532 or size == -2
526 or fn in self._copymap):
533 or fn in self._copymap):
527 madd(fn)
534 madd(fn)
528 elif time != int(st.st_mtime):
535 elif time != int(st.st_mtime):
529 ladd(fn)
536 ladd(fn)
530 elif list_clean:
537 elif list_clean:
531 cadd(fn)
538 cadd(fn)
532 elif type_ == 'm':
539 elif type_ == 'm':
533 madd(fn)
540 madd(fn)
534 elif type_ == 'a':
541 elif type_ == 'a':
535 aadd(fn)
542 aadd(fn)
536 elif type_ == 'r':
543 elif type_ == 'r':
537 radd(fn)
544 radd(fn)
538
545
539 return (lookup, modified, added, removed, deleted, unknown, ignored,
546 return (lookup, modified, added, removed, deleted, unknown, ignored,
540 clean)
547 clean)
@@ -1,1986 +1,1986 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.path = path
21 self.path = path
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.sopener = util.encodedopener(util.opener(self.spath),
73 self.encodefn)
73 self.encodefn)
74
74
75 self.ui = ui.ui(parentui=parentui)
75 self.ui = ui.ui(parentui=parentui)
76 try:
76 try:
77 self.ui.readconfig(self.join("hgrc"), self.root)
77 self.ui.readconfig(self.join("hgrc"), self.root)
78 extensions.loadall(self.ui)
78 extensions.loadall(self.ui)
79 except IOError:
79 except IOError:
80 pass
80 pass
81
81
82 self.tagscache = None
82 self.tagscache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError, name
101 raise AttributeError, name
102
102
103 def url(self):
103 def url(self):
104 return 'file:' + self.root
104 return 'file:' + self.root
105
105
106 def hook(self, name, throw=False, **args):
106 def hook(self, name, throw=False, **args):
107 return hook.hook(self.ui, self, name, throw, **args)
107 return hook.hook(self.ui, self, name, throw, **args)
108
108
109 tag_disallowed = ':\r\n'
109 tag_disallowed = ':\r\n'
110
110
111 def _tag(self, name, node, message, local, user, date, parent=None,
111 def _tag(self, name, node, message, local, user, date, parent=None,
112 extra={}):
112 extra={}):
113 use_dirstate = parent is None
113 use_dirstate = parent is None
114
114
115 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
116 if c in name:
116 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
118
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
120
121 def writetag(fp, name, munge, prevtags):
121 def writetag(fp, name, munge, prevtags):
122 if prevtags and prevtags[-1] != '\n':
122 if prevtags and prevtags[-1] != '\n':
123 fp.write('\n')
123 fp.write('\n')
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.close()
125 fp.close()
126 self.hook('tag', node=hex(node), tag=name, local=local)
126 self.hook('tag', node=hex(node), tag=name, local=local)
127
127
128 prevtags = ''
128 prevtags = ''
129 if local:
129 if local:
130 try:
130 try:
131 fp = self.opener('localtags', 'r+')
131 fp = self.opener('localtags', 'r+')
132 except IOError, err:
132 except IOError, err:
133 fp = self.opener('localtags', 'a')
133 fp = self.opener('localtags', 'a')
134 else:
134 else:
135 prevtags = fp.read()
135 prevtags = fp.read()
136
136
137 # local tags are stored in the current charset
137 # local tags are stored in the current charset
138 writetag(fp, name, None, prevtags)
138 writetag(fp, name, None, prevtags)
139 return
139 return
140
140
141 if use_dirstate:
141 if use_dirstate:
142 try:
142 try:
143 fp = self.wfile('.hgtags', 'rb+')
143 fp = self.wfile('.hgtags', 'rb+')
144 except IOError, err:
144 except IOError, err:
145 fp = self.wfile('.hgtags', 'ab')
145 fp = self.wfile('.hgtags', 'ab')
146 else:
146 else:
147 prevtags = fp.read()
147 prevtags = fp.read()
148 else:
148 else:
149 try:
149 try:
150 prevtags = self.filectx('.hgtags', parent).data()
150 prevtags = self.filectx('.hgtags', parent).data()
151 except revlog.LookupError:
151 except revlog.LookupError:
152 pass
152 pass
153 fp = self.wfile('.hgtags', 'wb')
153 fp = self.wfile('.hgtags', 'wb')
154
154
155 # committed tags are stored in UTF-8
155 # committed tags are stored in UTF-8
156 writetag(fp, name, util.fromlocal, prevtags)
156 writetag(fp, name, util.fromlocal, prevtags)
157
157
158 if use_dirstate and '.hgtags' not in self.dirstate:
158 if use_dirstate and '.hgtags' not in self.dirstate:
159 self.add(['.hgtags'])
159 self.add(['.hgtags'])
160
160
161 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
161 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
162 extra=extra)
162 extra=extra)
163
163
164 self.hook('tag', node=hex(node), tag=name, local=local)
164 self.hook('tag', node=hex(node), tag=name, local=local)
165
165
166 return tagnode
166 return tagnode
167
167
168 def tag(self, name, node, message, local, user, date):
168 def tag(self, name, node, message, local, user, date):
169 '''tag a revision with a symbolic name.
169 '''tag a revision with a symbolic name.
170
170
171 if local is True, the tag is stored in a per-repository file.
171 if local is True, the tag is stored in a per-repository file.
172 otherwise, it is stored in the .hgtags file, and a new
172 otherwise, it is stored in the .hgtags file, and a new
173 changeset is committed with the change.
173 changeset is committed with the change.
174
174
175 keyword arguments:
175 keyword arguments:
176
176
177 local: whether to store tag in non-version-controlled file
177 local: whether to store tag in non-version-controlled file
178 (default False)
178 (default False)
179
179
180 message: commit message to use if committing
180 message: commit message to use if committing
181
181
182 user: name of user to use if committing
182 user: name of user to use if committing
183
183
184 date: date tuple to use if committing'''
184 date: date tuple to use if committing'''
185
185
186 for x in self.status()[:5]:
186 for x in self.status()[:5]:
187 if '.hgtags' in x:
187 if '.hgtags' in x:
188 raise util.Abort(_('working copy of .hgtags is changed '
188 raise util.Abort(_('working copy of .hgtags is changed '
189 '(please commit .hgtags manually)'))
189 '(please commit .hgtags manually)'))
190
190
191
191
192 self._tag(name, node, message, local, user, date)
192 self._tag(name, node, message, local, user, date)
193
193
194 def tags(self):
194 def tags(self):
195 '''return a mapping of tag to node'''
195 '''return a mapping of tag to node'''
196 if self.tagscache:
196 if self.tagscache:
197 return self.tagscache
197 return self.tagscache
198
198
199 globaltags = {}
199 globaltags = {}
200
200
201 def readtags(lines, fn):
201 def readtags(lines, fn):
202 filetags = {}
202 filetags = {}
203 count = 0
203 count = 0
204
204
205 def warn(msg):
205 def warn(msg):
206 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
206 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
207
207
208 for l in lines:
208 for l in lines:
209 count += 1
209 count += 1
210 if not l:
210 if not l:
211 continue
211 continue
212 s = l.split(" ", 1)
212 s = l.split(" ", 1)
213 if len(s) != 2:
213 if len(s) != 2:
214 warn(_("cannot parse entry"))
214 warn(_("cannot parse entry"))
215 continue
215 continue
216 node, key = s
216 node, key = s
217 key = util.tolocal(key.strip()) # stored in UTF-8
217 key = util.tolocal(key.strip()) # stored in UTF-8
218 try:
218 try:
219 bin_n = bin(node)
219 bin_n = bin(node)
220 except TypeError:
220 except TypeError:
221 warn(_("node '%s' is not well formed") % node)
221 warn(_("node '%s' is not well formed") % node)
222 continue
222 continue
223 if bin_n not in self.changelog.nodemap:
223 if bin_n not in self.changelog.nodemap:
224 warn(_("tag '%s' refers to unknown node") % key)
224 warn(_("tag '%s' refers to unknown node") % key)
225 continue
225 continue
226
226
227 h = []
227 h = []
228 if key in filetags:
228 if key in filetags:
229 n, h = filetags[key]
229 n, h = filetags[key]
230 h.append(n)
230 h.append(n)
231 filetags[key] = (bin_n, h)
231 filetags[key] = (bin_n, h)
232
232
233 for k, nh in filetags.items():
233 for k, nh in filetags.items():
234 if k not in globaltags:
234 if k not in globaltags:
235 globaltags[k] = nh
235 globaltags[k] = nh
236 continue
236 continue
237 # we prefer the global tag if:
237 # we prefer the global tag if:
238 # it supercedes us OR
238 # it supercedes us OR
239 # mutual supercedes and it has a higher rank
239 # mutual supercedes and it has a higher rank
240 # otherwise we win because we're tip-most
240 # otherwise we win because we're tip-most
241 an, ah = nh
241 an, ah = nh
242 bn, bh = globaltags[k]
242 bn, bh = globaltags[k]
243 if (bn != an and an in bh and
243 if (bn != an and an in bh and
244 (bn not in ah or len(bh) > len(ah))):
244 (bn not in ah or len(bh) > len(ah))):
245 an = bn
245 an = bn
246 ah.extend([n for n in bh if n not in ah])
246 ah.extend([n for n in bh if n not in ah])
247 globaltags[k] = an, ah
247 globaltags[k] = an, ah
248
248
249 # read the tags file from each head, ending with the tip
249 # read the tags file from each head, ending with the tip
250 f = None
250 f = None
251 for rev, node, fnode in self._hgtagsnodes():
251 for rev, node, fnode in self._hgtagsnodes():
252 f = (f and f.filectx(fnode) or
252 f = (f and f.filectx(fnode) or
253 self.filectx('.hgtags', fileid=fnode))
253 self.filectx('.hgtags', fileid=fnode))
254 readtags(f.data().splitlines(), f)
254 readtags(f.data().splitlines(), f)
255
255
256 try:
256 try:
257 data = util.fromlocal(self.opener("localtags").read())
257 data = util.fromlocal(self.opener("localtags").read())
258 # localtags are stored in the local character set
258 # localtags are stored in the local character set
259 # while the internal tag table is stored in UTF-8
259 # while the internal tag table is stored in UTF-8
260 readtags(data.splitlines(), "localtags")
260 readtags(data.splitlines(), "localtags")
261 except IOError:
261 except IOError:
262 pass
262 pass
263
263
264 self.tagscache = {}
264 self.tagscache = {}
265 for k,nh in globaltags.items():
265 for k,nh in globaltags.items():
266 n = nh[0]
266 n = nh[0]
267 if n != nullid:
267 if n != nullid:
268 self.tagscache[k] = n
268 self.tagscache[k] = n
269 self.tagscache['tip'] = self.changelog.tip()
269 self.tagscache['tip'] = self.changelog.tip()
270
270
271 return self.tagscache
271 return self.tagscache
272
272
273 def _hgtagsnodes(self):
273 def _hgtagsnodes(self):
274 heads = self.heads()
274 heads = self.heads()
275 heads.reverse()
275 heads.reverse()
276 last = {}
276 last = {}
277 ret = []
277 ret = []
278 for node in heads:
278 for node in heads:
279 c = self.changectx(node)
279 c = self.changectx(node)
280 rev = c.rev()
280 rev = c.rev()
281 try:
281 try:
282 fnode = c.filenode('.hgtags')
282 fnode = c.filenode('.hgtags')
283 except revlog.LookupError:
283 except revlog.LookupError:
284 continue
284 continue
285 ret.append((rev, node, fnode))
285 ret.append((rev, node, fnode))
286 if fnode in last:
286 if fnode in last:
287 ret[last[fnode]] = None
287 ret[last[fnode]] = None
288 last[fnode] = len(ret) - 1
288 last[fnode] = len(ret) - 1
289 return [item for item in ret if item]
289 return [item for item in ret if item]
290
290
291 def tagslist(self):
291 def tagslist(self):
292 '''return a list of tags ordered by revision'''
292 '''return a list of tags ordered by revision'''
293 l = []
293 l = []
294 for t, n in self.tags().items():
294 for t, n in self.tags().items():
295 try:
295 try:
296 r = self.changelog.rev(n)
296 r = self.changelog.rev(n)
297 except:
297 except:
298 r = -2 # sort to the beginning of the list if unknown
298 r = -2 # sort to the beginning of the list if unknown
299 l.append((r, t, n))
299 l.append((r, t, n))
300 l.sort()
300 l.sort()
301 return [(t, n) for r, t, n in l]
301 return [(t, n) for r, t, n in l]
302
302
303 def nodetags(self, node):
303 def nodetags(self, node):
304 '''return the tags associated with a node'''
304 '''return the tags associated with a node'''
305 if not self.nodetagscache:
305 if not self.nodetagscache:
306 self.nodetagscache = {}
306 self.nodetagscache = {}
307 for t, n in self.tags().items():
307 for t, n in self.tags().items():
308 self.nodetagscache.setdefault(n, []).append(t)
308 self.nodetagscache.setdefault(n, []).append(t)
309 return self.nodetagscache.get(node, [])
309 return self.nodetagscache.get(node, [])
310
310
311 def _branchtags(self):
311 def _branchtags(self):
312 partial, last, lrev = self._readbranchcache()
312 partial, last, lrev = self._readbranchcache()
313
313
314 tiprev = self.changelog.count() - 1
314 tiprev = self.changelog.count() - 1
315 if lrev != tiprev:
315 if lrev != tiprev:
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318
318
319 return partial
319 return partial
320
320
321 def branchtags(self):
321 def branchtags(self):
322 if self.branchcache is not None:
322 if self.branchcache is not None:
323 return self.branchcache
323 return self.branchcache
324
324
325 self.branchcache = {} # avoid recursion in changectx
325 self.branchcache = {} # avoid recursion in changectx
326 partial = self._branchtags()
326 partial = self._branchtags()
327
327
328 # the branch cache is stored on disk as UTF-8, but in the local
328 # the branch cache is stored on disk as UTF-8, but in the local
329 # charset internally
329 # charset internally
330 for k, v in partial.items():
330 for k, v in partial.items():
331 self.branchcache[util.tolocal(k)] = v
331 self.branchcache[util.tolocal(k)] = v
332 return self.branchcache
332 return self.branchcache
333
333
334 def _readbranchcache(self):
334 def _readbranchcache(self):
335 partial = {}
335 partial = {}
336 try:
336 try:
337 f = self.opener("branch.cache")
337 f = self.opener("branch.cache")
338 lines = f.read().split('\n')
338 lines = f.read().split('\n')
339 f.close()
339 f.close()
340 except (IOError, OSError):
340 except (IOError, OSError):
341 return {}, nullid, nullrev
341 return {}, nullid, nullrev
342
342
343 try:
343 try:
344 last, lrev = lines.pop(0).split(" ", 1)
344 last, lrev = lines.pop(0).split(" ", 1)
345 last, lrev = bin(last), int(lrev)
345 last, lrev = bin(last), int(lrev)
346 if not (lrev < self.changelog.count() and
346 if not (lrev < self.changelog.count() and
347 self.changelog.node(lrev) == last): # sanity check
347 self.changelog.node(lrev) == last): # sanity check
348 # invalidate the cache
348 # invalidate the cache
349 raise ValueError('Invalid branch cache: unknown tip')
349 raise ValueError('Invalid branch cache: unknown tip')
350 for l in lines:
350 for l in lines:
351 if not l: continue
351 if not l: continue
352 node, label = l.split(" ", 1)
352 node, label = l.split(" ", 1)
353 partial[label.strip()] = bin(node)
353 partial[label.strip()] = bin(node)
354 except (KeyboardInterrupt, util.SignalInterrupt):
354 except (KeyboardInterrupt, util.SignalInterrupt):
355 raise
355 raise
356 except Exception, inst:
356 except Exception, inst:
357 if self.ui.debugflag:
357 if self.ui.debugflag:
358 self.ui.warn(str(inst), '\n')
358 self.ui.warn(str(inst), '\n')
359 partial, last, lrev = {}, nullid, nullrev
359 partial, last, lrev = {}, nullid, nullrev
360 return partial, last, lrev
360 return partial, last, lrev
361
361
362 def _writebranchcache(self, branches, tip, tiprev):
362 def _writebranchcache(self, branches, tip, tiprev):
363 try:
363 try:
364 f = self.opener("branch.cache", "w", atomictemp=True)
364 f = self.opener("branch.cache", "w", atomictemp=True)
365 f.write("%s %s\n" % (hex(tip), tiprev))
365 f.write("%s %s\n" % (hex(tip), tiprev))
366 for label, node in branches.iteritems():
366 for label, node in branches.iteritems():
367 f.write("%s %s\n" % (hex(node), label))
367 f.write("%s %s\n" % (hex(node), label))
368 f.rename()
368 f.rename()
369 except (IOError, OSError):
369 except (IOError, OSError):
370 pass
370 pass
371
371
372 def _updatebranchcache(self, partial, start, end):
372 def _updatebranchcache(self, partial, start, end):
373 for r in xrange(start, end):
373 for r in xrange(start, end):
374 c = self.changectx(r)
374 c = self.changectx(r)
375 b = c.branch()
375 b = c.branch()
376 partial[b] = c.node()
376 partial[b] = c.node()
377
377
378 def lookup(self, key):
378 def lookup(self, key):
379 if key == '.':
379 if key == '.':
380 key, second = self.dirstate.parents()
380 key, second = self.dirstate.parents()
381 if key == nullid:
381 if key == nullid:
382 raise repo.RepoError(_("no revision checked out"))
382 raise repo.RepoError(_("no revision checked out"))
383 if second != nullid:
383 if second != nullid:
384 self.ui.warn(_("warning: working directory has two parents, "
384 self.ui.warn(_("warning: working directory has two parents, "
385 "tag '.' uses the first\n"))
385 "tag '.' uses the first\n"))
386 elif key == 'null':
386 elif key == 'null':
387 return nullid
387 return nullid
388 n = self.changelog._match(key)
388 n = self.changelog._match(key)
389 if n:
389 if n:
390 return n
390 return n
391 if key in self.tags():
391 if key in self.tags():
392 return self.tags()[key]
392 return self.tags()[key]
393 if key in self.branchtags():
393 if key in self.branchtags():
394 return self.branchtags()[key]
394 return self.branchtags()[key]
395 n = self.changelog._partialmatch(key)
395 n = self.changelog._partialmatch(key)
396 if n:
396 if n:
397 return n
397 return n
398 try:
398 try:
399 if len(key) == 20:
399 if len(key) == 20:
400 key = hex(key)
400 key = hex(key)
401 except:
401 except:
402 pass
402 pass
403 raise repo.RepoError(_("unknown revision '%s'") % key)
403 raise repo.RepoError(_("unknown revision '%s'") % key)
404
404
405 def dev(self):
405 def dev(self):
406 return os.lstat(self.path).st_dev
406 return os.lstat(self.path).st_dev
407
407
408 def local(self):
408 def local(self):
409 return True
409 return True
410
410
411 def join(self, f):
411 def join(self, f):
412 return os.path.join(self.path, f)
412 return os.path.join(self.path, f)
413
413
414 def sjoin(self, f):
414 def sjoin(self, f):
415 f = self.encodefn(f)
415 f = self.encodefn(f)
416 return os.path.join(self.spath, f)
416 return os.path.join(self.spath, f)
417
417
418 def wjoin(self, f):
418 def wjoin(self, f):
419 return os.path.join(self.root, f)
419 return os.path.join(self.root, f)
420
420
421 def file(self, f):
421 def file(self, f):
422 if f[0] == '/':
422 if f[0] == '/':
423 f = f[1:]
423 f = f[1:]
424 return filelog.filelog(self.sopener, f)
424 return filelog.filelog(self.sopener, f)
425
425
426 def changectx(self, changeid=None):
426 def changectx(self, changeid=None):
427 return context.changectx(self, changeid)
427 return context.changectx(self, changeid)
428
428
429 def workingctx(self):
429 def workingctx(self):
430 return context.workingctx(self)
430 return context.workingctx(self)
431
431
432 def parents(self, changeid=None):
432 def parents(self, changeid=None):
433 '''
433 '''
434 get list of changectxs for parents of changeid or working directory
434 get list of changectxs for parents of changeid or working directory
435 '''
435 '''
436 if changeid is None:
436 if changeid is None:
437 pl = self.dirstate.parents()
437 pl = self.dirstate.parents()
438 else:
438 else:
439 n = self.changelog.lookup(changeid)
439 n = self.changelog.lookup(changeid)
440 pl = self.changelog.parents(n)
440 pl = self.changelog.parents(n)
441 if pl[1] == nullid:
441 if pl[1] == nullid:
442 return [self.changectx(pl[0])]
442 return [self.changectx(pl[0])]
443 return [self.changectx(pl[0]), self.changectx(pl[1])]
443 return [self.changectx(pl[0]), self.changectx(pl[1])]
444
444
445 def filectx(self, path, changeid=None, fileid=None):
445 def filectx(self, path, changeid=None, fileid=None):
446 """changeid can be a changeset revision, node, or tag.
446 """changeid can be a changeset revision, node, or tag.
447 fileid can be a file revision or node."""
447 fileid can be a file revision or node."""
448 return context.filectx(self, path, changeid, fileid)
448 return context.filectx(self, path, changeid, fileid)
449
449
450 def getcwd(self):
450 def getcwd(self):
451 return self.dirstate.getcwd()
451 return self.dirstate.getcwd()
452
452
453 def pathto(self, f, cwd=None):
453 def pathto(self, f, cwd=None):
454 return self.dirstate.pathto(f, cwd)
454 return self.dirstate.pathto(f, cwd)
455
455
456 def wfile(self, f, mode='r'):
456 def wfile(self, f, mode='r'):
457 return self.wopener(f, mode)
457 return self.wopener(f, mode)
458
458
459 def _link(self, f):
459 def _link(self, f):
460 return os.path.islink(self.wjoin(f))
460 return os.path.islink(self.wjoin(f))
461
461
462 def _filter(self, filter, filename, data):
462 def _filter(self, filter, filename, data):
463 if filter not in self.filterpats:
463 if filter not in self.filterpats:
464 l = []
464 l = []
465 for pat, cmd in self.ui.configitems(filter):
465 for pat, cmd in self.ui.configitems(filter):
466 mf = util.matcher(self.root, "", [pat], [], [])[1]
466 mf = util.matcher(self.root, "", [pat], [], [])[1]
467 l.append((mf, cmd))
467 l.append((mf, cmd))
468 self.filterpats[filter] = l
468 self.filterpats[filter] = l
469
469
470 for mf, cmd in self.filterpats[filter]:
470 for mf, cmd in self.filterpats[filter]:
471 if mf(filename):
471 if mf(filename):
472 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
472 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
473 data = util.filter(data, cmd)
473 data = util.filter(data, cmd)
474 break
474 break
475
475
476 return data
476 return data
477
477
478 def wread(self, filename):
478 def wread(self, filename):
479 if self._link(filename):
479 if self._link(filename):
480 data = os.readlink(self.wjoin(filename))
480 data = os.readlink(self.wjoin(filename))
481 else:
481 else:
482 data = self.wopener(filename, 'r').read()
482 data = self.wopener(filename, 'r').read()
483 return self._filter("encode", filename, data)
483 return self._filter("encode", filename, data)
484
484
485 def wwrite(self, filename, data, flags):
485 def wwrite(self, filename, data, flags):
486 data = self._filter("decode", filename, data)
486 data = self._filter("decode", filename, data)
487 if "l" in flags:
487 if "l" in flags:
488 self.wopener.symlink(data, filename)
488 self.wopener.symlink(data, filename)
489 else:
489 else:
490 try:
490 try:
491 if self._link(filename):
491 if self._link(filename):
492 os.unlink(self.wjoin(filename))
492 os.unlink(self.wjoin(filename))
493 except OSError:
493 except OSError:
494 pass
494 pass
495 self.wopener(filename, 'w').write(data)
495 self.wopener(filename, 'w').write(data)
496 util.set_exec(self.wjoin(filename), "x" in flags)
496 util.set_exec(self.wjoin(filename), "x" in flags)
497
497
498 def wwritedata(self, filename, data):
498 def wwritedata(self, filename, data):
499 return self._filter("decode", filename, data)
499 return self._filter("decode", filename, data)
500
500
501 def transaction(self):
501 def transaction(self):
502 if self._transref and self._transref():
502 if self._transref and self._transref():
503 return self._transref().nest()
503 return self._transref().nest()
504
504
505 # save dirstate for rollback
505 # save dirstate for rollback
506 try:
506 try:
507 ds = self.opener("dirstate").read()
507 ds = self.opener("dirstate").read()
508 except IOError:
508 except IOError:
509 ds = ""
509 ds = ""
510 self.opener("journal.dirstate", "w").write(ds)
510 self.opener("journal.dirstate", "w").write(ds)
511
511
512 renames = [(self.sjoin("journal"), self.sjoin("undo")),
512 renames = [(self.sjoin("journal"), self.sjoin("undo")),
513 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
513 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
514 tr = transaction.transaction(self.ui.warn, self.sopener,
514 tr = transaction.transaction(self.ui.warn, self.sopener,
515 self.sjoin("journal"),
515 self.sjoin("journal"),
516 aftertrans(renames))
516 aftertrans(renames))
517 self._transref = weakref.ref(tr)
517 self._transref = weakref.ref(tr)
518 return tr
518 return tr
519
519
520 def recover(self):
520 def recover(self):
521 l = self.lock()
521 l = self.lock()
522 try:
522 try:
523 if os.path.exists(self.sjoin("journal")):
523 if os.path.exists(self.sjoin("journal")):
524 self.ui.status(_("rolling back interrupted transaction\n"))
524 self.ui.status(_("rolling back interrupted transaction\n"))
525 transaction.rollback(self.sopener, self.sjoin("journal"))
525 transaction.rollback(self.sopener, self.sjoin("journal"))
526 self.invalidate()
526 self.invalidate()
527 return True
527 return True
528 else:
528 else:
529 self.ui.warn(_("no interrupted transaction available\n"))
529 self.ui.warn(_("no interrupted transaction available\n"))
530 return False
530 return False
531 finally:
531 finally:
532 del l
532 del l
533
533
534 def rollback(self):
534 def rollback(self):
535 wlock = lock = None
535 wlock = lock = None
536 try:
536 try:
537 wlock = self.wlock()
537 wlock = self.wlock()
538 lock = self.lock()
538 lock = self.lock()
539 if os.path.exists(self.sjoin("undo")):
539 if os.path.exists(self.sjoin("undo")):
540 self.ui.status(_("rolling back last transaction\n"))
540 self.ui.status(_("rolling back last transaction\n"))
541 transaction.rollback(self.sopener, self.sjoin("undo"))
541 transaction.rollback(self.sopener, self.sjoin("undo"))
542 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
542 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
543 self.invalidate()
543 self.invalidate()
544 self.dirstate.invalidate()
544 self.dirstate.invalidate()
545 else:
545 else:
546 self.ui.warn(_("no rollback information available\n"))
546 self.ui.warn(_("no rollback information available\n"))
547 finally:
547 finally:
548 del lock, wlock
548 del lock, wlock
549
549
550 def invalidate(self):
550 def invalidate(self):
551 for a in "changelog manifest".split():
551 for a in "changelog manifest".split():
552 if hasattr(self, a):
552 if hasattr(self, a):
553 self.__delattr__(a)
553 self.__delattr__(a)
554 self.tagscache = None
554 self.tagscache = None
555 self.nodetagscache = None
555 self.nodetagscache = None
556
556
557 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
557 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
558 try:
558 try:
559 l = lock.lock(lockname, 0, releasefn, desc=desc)
559 l = lock.lock(lockname, 0, releasefn, desc=desc)
560 except lock.LockHeld, inst:
560 except lock.LockHeld, inst:
561 if not wait:
561 if not wait:
562 raise
562 raise
563 self.ui.warn(_("waiting for lock on %s held by %r\n") %
563 self.ui.warn(_("waiting for lock on %s held by %r\n") %
564 (desc, inst.locker))
564 (desc, inst.locker))
565 # default to 600 seconds timeout
565 # default to 600 seconds timeout
566 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
566 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
567 releasefn, desc=desc)
567 releasefn, desc=desc)
568 if acquirefn:
568 if acquirefn:
569 acquirefn()
569 acquirefn()
570 return l
570 return l
571
571
572 def lock(self, wait=True):
572 def lock(self, wait=True):
573 if self._lockref and self._lockref():
573 if self._lockref and self._lockref():
574 return self._lockref()
574 return self._lockref()
575
575
576 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
576 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
577 _('repository %s') % self.origroot)
577 _('repository %s') % self.origroot)
578 self._lockref = weakref.ref(l)
578 self._lockref = weakref.ref(l)
579 return l
579 return l
580
580
581 def wlock(self, wait=True):
581 def wlock(self, wait=True):
582 if self._wlockref and self._wlockref():
582 if self._wlockref and self._wlockref():
583 return self._wlockref()
583 return self._wlockref()
584
584
585 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
585 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
586 self.dirstate.invalidate, _('working directory of %s') %
586 self.dirstate.invalidate, _('working directory of %s') %
587 self.origroot)
587 self.origroot)
588 self._wlockref = weakref.ref(l)
588 self._wlockref = weakref.ref(l)
589 return l
589 return l
590
590
591 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
591 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
592 """
592 """
593 commit an individual file as part of a larger transaction
593 commit an individual file as part of a larger transaction
594 """
594 """
595
595
596 t = self.wread(fn)
596 t = self.wread(fn)
597 fl = self.file(fn)
597 fl = self.file(fn)
598 fp1 = manifest1.get(fn, nullid)
598 fp1 = manifest1.get(fn, nullid)
599 fp2 = manifest2.get(fn, nullid)
599 fp2 = manifest2.get(fn, nullid)
600
600
601 meta = {}
601 meta = {}
602 cp = self.dirstate.copied(fn)
602 cp = self.dirstate.copied(fn)
603 if cp:
603 if cp:
604 # Mark the new revision of this file as a copy of another
604 # Mark the new revision of this file as a copy of another
605 # file. This copy data will effectively act as a parent
605 # file. This copy data will effectively act as a parent
606 # of this new revision. If this is a merge, the first
606 # of this new revision. If this is a merge, the first
607 # parent will be the nullid (meaning "look up the copy data")
607 # parent will be the nullid (meaning "look up the copy data")
608 # and the second one will be the other parent. For example:
608 # and the second one will be the other parent. For example:
609 #
609 #
610 # 0 --- 1 --- 3 rev1 changes file foo
610 # 0 --- 1 --- 3 rev1 changes file foo
611 # \ / rev2 renames foo to bar and changes it
611 # \ / rev2 renames foo to bar and changes it
612 # \- 2 -/ rev3 should have bar with all changes and
612 # \- 2 -/ rev3 should have bar with all changes and
613 # should record that bar descends from
613 # should record that bar descends from
614 # bar in rev2 and foo in rev1
614 # bar in rev2 and foo in rev1
615 #
615 #
616 # this allows this merge to succeed:
616 # this allows this merge to succeed:
617 #
617 #
618 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
618 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
619 # \ / merging rev3 and rev4 should use bar@rev2
619 # \ / merging rev3 and rev4 should use bar@rev2
620 # \- 2 --- 4 as the merge base
620 # \- 2 --- 4 as the merge base
621 #
621 #
622 meta["copy"] = cp
622 meta["copy"] = cp
623 if not manifest2: # not a branch merge
623 if not manifest2: # not a branch merge
624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 fp2 = nullid
625 fp2 = nullid
626 elif fp2 != nullid: # copied on remote side
626 elif fp2 != nullid: # copied on remote side
627 meta["copyrev"] = hex(manifest1.get(cp, nullid))
627 meta["copyrev"] = hex(manifest1.get(cp, nullid))
628 elif fp1 != nullid: # copied on local side, reversed
628 elif fp1 != nullid: # copied on local side, reversed
629 meta["copyrev"] = hex(manifest2.get(cp))
629 meta["copyrev"] = hex(manifest2.get(cp))
630 fp2 = fp1
630 fp2 = fp1
631 else: # directory rename
631 else: # directory rename
632 meta["copyrev"] = hex(manifest1.get(cp, nullid))
632 meta["copyrev"] = hex(manifest1.get(cp, nullid))
633 self.ui.debug(_(" %s: copy %s:%s\n") %
633 self.ui.debug(_(" %s: copy %s:%s\n") %
634 (fn, cp, meta["copyrev"]))
634 (fn, cp, meta["copyrev"]))
635 fp1 = nullid
635 fp1 = nullid
636 elif fp2 != nullid:
636 elif fp2 != nullid:
637 # is one parent an ancestor of the other?
637 # is one parent an ancestor of the other?
638 fpa = fl.ancestor(fp1, fp2)
638 fpa = fl.ancestor(fp1, fp2)
639 if fpa == fp1:
639 if fpa == fp1:
640 fp1, fp2 = fp2, nullid
640 fp1, fp2 = fp2, nullid
641 elif fpa == fp2:
641 elif fpa == fp2:
642 fp2 = nullid
642 fp2 = nullid
643
643
644 # is the file unmodified from the parent? report existing entry
644 # is the file unmodified from the parent? report existing entry
645 if fp2 == nullid and not fl.cmp(fp1, t):
645 if fp2 == nullid and not fl.cmp(fp1, t):
646 return fp1
646 return fp1
647
647
648 changelist.append(fn)
648 changelist.append(fn)
649 return fl.add(t, meta, tr, linkrev, fp1, fp2)
649 return fl.add(t, meta, tr, linkrev, fp1, fp2)
650
650
651 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
651 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
652 if p1 is None:
652 if p1 is None:
653 p1, p2 = self.dirstate.parents()
653 p1, p2 = self.dirstate.parents()
654 return self.commit(files=files, text=text, user=user, date=date,
654 return self.commit(files=files, text=text, user=user, date=date,
655 p1=p1, p2=p2, extra=extra, empty_ok=True)
655 p1=p1, p2=p2, extra=extra, empty_ok=True)
656
656
657 def commit(self, files=None, text="", user=None, date=None,
657 def commit(self, files=None, text="", user=None, date=None,
658 match=util.always, force=False, force_editor=False,
658 match=util.always, force=False, force_editor=False,
659 p1=None, p2=None, extra={}, empty_ok=False):
659 p1=None, p2=None, extra={}, empty_ok=False):
660 wlock = lock = tr = None
660 wlock = lock = tr = None
661 try:
661 try:
662 commit = []
662 commit = []
663 remove = []
663 remove = []
664 changed = []
664 changed = []
665 use_dirstate = (p1 is None) # not rawcommit
665 use_dirstate = (p1 is None) # not rawcommit
666 extra = extra.copy()
666 extra = extra.copy()
667
667
668 if use_dirstate:
668 if use_dirstate:
669 if files:
669 if files:
670 for f in files:
670 for f in files:
671 s = self.dirstate[f]
671 s = self.dirstate[f]
672 if s in 'nma':
672 if s in 'nma':
673 commit.append(f)
673 commit.append(f)
674 elif s == 'r':
674 elif s == 'r':
675 remove.append(f)
675 remove.append(f)
676 else:
676 else:
677 self.ui.warn(_("%s not tracked!\n") % f)
677 self.ui.warn(_("%s not tracked!\n") % f)
678 else:
678 else:
679 changes = self.status(match=match)[:5]
679 changes = self.status(match=match)[:5]
680 modified, added, removed, deleted, unknown = changes
680 modified, added, removed, deleted, unknown = changes
681 commit = modified + added
681 commit = modified + added
682 remove = removed
682 remove = removed
683 else:
683 else:
684 commit = files
684 commit = files
685
685
686 if use_dirstate:
686 if use_dirstate:
687 p1, p2 = self.dirstate.parents()
687 p1, p2 = self.dirstate.parents()
688 update_dirstate = True
688 update_dirstate = True
689 else:
689 else:
690 p1, p2 = p1, p2 or nullid
690 p1, p2 = p1, p2 or nullid
691 update_dirstate = (self.dirstate.parents()[0] == p1)
691 update_dirstate = (self.dirstate.parents()[0] == p1)
692
692
693 c1 = self.changelog.read(p1)
693 c1 = self.changelog.read(p1)
694 c2 = self.changelog.read(p2)
694 c2 = self.changelog.read(p2)
695 m1 = self.manifest.read(c1[0]).copy()
695 m1 = self.manifest.read(c1[0]).copy()
696 m2 = self.manifest.read(c2[0])
696 m2 = self.manifest.read(c2[0])
697
697
698 if use_dirstate:
698 if use_dirstate:
699 branchname = self.workingctx().branch()
699 branchname = self.workingctx().branch()
700 try:
700 try:
701 branchname = branchname.decode('UTF-8').encode('UTF-8')
701 branchname = branchname.decode('UTF-8').encode('UTF-8')
702 except UnicodeDecodeError:
702 except UnicodeDecodeError:
703 raise util.Abort(_('branch name not in UTF-8!'))
703 raise util.Abort(_('branch name not in UTF-8!'))
704 else:
704 else:
705 branchname = ""
705 branchname = ""
706
706
707 if use_dirstate:
707 if use_dirstate:
708 oldname = c1[5].get("branch") # stored in UTF-8
708 oldname = c1[5].get("branch") # stored in UTF-8
709 if (not commit and not remove and not force and p2 == nullid
709 if (not commit and not remove and not force and p2 == nullid
710 and branchname == oldname):
710 and branchname == oldname):
711 self.ui.status(_("nothing changed\n"))
711 self.ui.status(_("nothing changed\n"))
712 return None
712 return None
713
713
714 xp1 = hex(p1)
714 xp1 = hex(p1)
715 if p2 == nullid: xp2 = ''
715 if p2 == nullid: xp2 = ''
716 else: xp2 = hex(p2)
716 else: xp2 = hex(p2)
717
717
718 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
718 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
719
719
720 wlock = self.wlock()
720 wlock = self.wlock()
721 lock = self.lock()
721 lock = self.lock()
722 tr = self.transaction()
722 tr = self.transaction()
723 trp = weakref.proxy(tr)
723 trp = weakref.proxy(tr)
724
724
725 # check in files
725 # check in files
726 new = {}
726 new = {}
727 linkrev = self.changelog.count()
727 linkrev = self.changelog.count()
728 commit.sort()
728 commit.sort()
729 is_exec = util.execfunc(self.root, m1.execf)
729 is_exec = util.execfunc(self.root, m1.execf)
730 is_link = util.linkfunc(self.root, m1.linkf)
730 is_link = util.linkfunc(self.root, m1.linkf)
731 for f in commit:
731 for f in commit:
732 self.ui.note(f + "\n")
732 self.ui.note(f + "\n")
733 try:
733 try:
734 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
734 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
735 new_exec = is_exec(f)
735 new_exec = is_exec(f)
736 new_link = is_link(f)
736 new_link = is_link(f)
737 if not changed or changed[-1] != f:
737 if not changed or changed[-1] != f:
738 # mention the file in the changelog if some
738 # mention the file in the changelog if some
739 # flag changed, even if there was no content
739 # flag changed, even if there was no content
740 # change.
740 # change.
741 old_exec = m1.execf(f)
741 old_exec = m1.execf(f)
742 old_link = m1.linkf(f)
742 old_link = m1.linkf(f)
743 if old_exec != new_exec or old_link != new_link:
743 if old_exec != new_exec or old_link != new_link:
744 changed.append(f)
744 changed.append(f)
745 m1.set(f, new_exec, new_link)
745 m1.set(f, new_exec, new_link)
746 except (OSError, IOError):
746 except (OSError, IOError):
747 if use_dirstate:
747 if use_dirstate:
748 self.ui.warn(_("trouble committing %s!\n") % f)
748 self.ui.warn(_("trouble committing %s!\n") % f)
749 raise
749 raise
750 else:
750 else:
751 remove.append(f)
751 remove.append(f)
752
752
753 # update manifest
753 # update manifest
754 m1.update(new)
754 m1.update(new)
755 remove.sort()
755 remove.sort()
756 removed = []
756 removed = []
757
757
758 for f in remove:
758 for f in remove:
759 if f in m1:
759 if f in m1:
760 del m1[f]
760 del m1[f]
761 removed.append(f)
761 removed.append(f)
762 elif f in m2:
762 elif f in m2:
763 removed.append(f)
763 removed.append(f)
764 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
764 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
765 (new, removed))
765 (new, removed))
766
766
767 # add changeset
767 # add changeset
768 new = new.keys()
768 new = new.keys()
769 new.sort()
769 new.sort()
770
770
771 user = user or self.ui.username()
771 user = user or self.ui.username()
772 if (not empty_ok and not text) or force_editor:
772 if (not empty_ok and not text) or force_editor:
773 edittext = []
773 edittext = []
774 if text:
774 if text:
775 edittext.append(text)
775 edittext.append(text)
776 edittext.append("")
776 edittext.append("")
777 edittext.append("HG: user: %s" % user)
777 edittext.append("HG: user: %s" % user)
778 if p2 != nullid:
778 if p2 != nullid:
779 edittext.append("HG: branch merge")
779 edittext.append("HG: branch merge")
780 if branchname:
780 if branchname:
781 edittext.append("HG: branch %s" % util.tolocal(branchname))
781 edittext.append("HG: branch %s" % util.tolocal(branchname))
782 edittext.extend(["HG: changed %s" % f for f in changed])
782 edittext.extend(["HG: changed %s" % f for f in changed])
783 edittext.extend(["HG: removed %s" % f for f in removed])
783 edittext.extend(["HG: removed %s" % f for f in removed])
784 if not changed and not remove:
784 if not changed and not remove:
785 edittext.append("HG: no files changed")
785 edittext.append("HG: no files changed")
786 edittext.append("")
786 edittext.append("")
787 # run editor in the repository root
787 # run editor in the repository root
788 olddir = os.getcwd()
788 olddir = os.getcwd()
789 os.chdir(self.root)
789 os.chdir(self.root)
790 text = self.ui.edit("\n".join(edittext), user)
790 text = self.ui.edit("\n".join(edittext), user)
791 os.chdir(olddir)
791 os.chdir(olddir)
792
792
793 if branchname:
793 if branchname:
794 extra["branch"] = branchname
794 extra["branch"] = branchname
795
795
796 if use_dirstate:
796 if use_dirstate:
797 lines = [line.rstrip() for line in text.rstrip().splitlines()]
797 lines = [line.rstrip() for line in text.rstrip().splitlines()]
798 while lines and not lines[0]:
798 while lines and not lines[0]:
799 del lines[0]
799 del lines[0]
800 if not lines:
800 if not lines:
801 return None
801 return None
802 text = '\n'.join(lines)
802 text = '\n'.join(lines)
803
803
804 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
804 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
805 user, date, extra)
805 user, date, extra)
806 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
806 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
807 parent2=xp2)
807 parent2=xp2)
808 tr.close()
808 tr.close()
809
809
810 if self.branchcache and "branch" in extra:
810 if self.branchcache and "branch" in extra:
811 self.branchcache[util.tolocal(extra["branch"])] = n
811 self.branchcache[util.tolocal(extra["branch"])] = n
812
812
813 if use_dirstate or update_dirstate:
813 if use_dirstate or update_dirstate:
814 self.dirstate.setparents(n)
814 self.dirstate.setparents(n)
815 if use_dirstate:
815 if use_dirstate:
816 for f in new:
816 for f in new:
817 self.dirstate.normal(f)
817 self.dirstate.normal(f)
818 for f in removed:
818 for f in removed:
819 self.dirstate.forget(f)
819 self.dirstate.forget(f)
820
820
821 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
821 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
822 return n
822 return n
823 finally:
823 finally:
824 del tr, lock, wlock
824 del tr, lock, wlock
825
825
826 def walk(self, node=None, files=[], match=util.always, badmatch=None):
826 def walk(self, node=None, files=[], match=util.always, badmatch=None):
827 '''
827 '''
828 walk recursively through the directory tree or a given
828 walk recursively through the directory tree or a given
829 changeset, finding all files matched by the match
829 changeset, finding all files matched by the match
830 function
830 function
831
831
832 results are yielded in a tuple (src, filename), where src
832 results are yielded in a tuple (src, filename), where src
833 is one of:
833 is one of:
834 'f' the file was found in the directory tree
834 'f' the file was found in the directory tree
835 'm' the file was only in the dirstate and not in the tree
835 'm' the file was only in the dirstate and not in the tree
836 'b' file was not found and matched badmatch
836 'b' file was not found and matched badmatch
837 '''
837 '''
838
838
839 if node:
839 if node:
840 fdict = dict.fromkeys(files)
840 fdict = dict.fromkeys(files)
841 # for dirstate.walk, files=['.'] means "walk the whole tree".
841 # for dirstate.walk, files=['.'] means "walk the whole tree".
842 # follow that here, too
842 # follow that here, too
843 fdict.pop('.', None)
843 fdict.pop('.', None)
844 mdict = self.manifest.read(self.changelog.read(node)[0])
844 mdict = self.manifest.read(self.changelog.read(node)[0])
845 mfiles = mdict.keys()
845 mfiles = mdict.keys()
846 mfiles.sort()
846 mfiles.sort()
847 for fn in mfiles:
847 for fn in mfiles:
848 for ffn in fdict:
848 for ffn in fdict:
849 # match if the file is the exact name or a directory
849 # match if the file is the exact name or a directory
850 if ffn == fn or fn.startswith("%s/" % ffn):
850 if ffn == fn or fn.startswith("%s/" % ffn):
851 del fdict[ffn]
851 del fdict[ffn]
852 break
852 break
853 if match(fn):
853 if match(fn):
854 yield 'm', fn
854 yield 'm', fn
855 ffiles = fdict.keys()
855 ffiles = fdict.keys()
856 ffiles.sort()
856 ffiles.sort()
857 for fn in ffiles:
857 for fn in ffiles:
858 if badmatch and badmatch(fn):
858 if badmatch and badmatch(fn):
859 if match(fn):
859 if match(fn):
860 yield 'b', fn
860 yield 'b', fn
861 else:
861 else:
862 self.ui.warn(_('%s: No such file in rev %s\n')
862 self.ui.warn(_('%s: No such file in rev %s\n')
863 % (self.pathto(fn), short(node)))
863 % (self.pathto(fn), short(node)))
864 else:
864 else:
865 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
865 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
866 yield src, fn
866 yield src, fn
867
867
868 def status(self, node1=None, node2=None, files=[], match=util.always,
868 def status(self, node1=None, node2=None, files=[], match=util.always,
869 list_ignored=False, list_clean=False):
869 list_ignored=False, list_clean=False):
870 """return status of files between two nodes or node and working directory
870 """return status of files between two nodes or node and working directory
871
871
872 If node1 is None, use the first dirstate parent instead.
872 If node1 is None, use the first dirstate parent instead.
873 If node2 is None, compare node1 with working directory.
873 If node2 is None, compare node1 with working directory.
874 """
874 """
875
875
876 def fcmp(fn, getnode):
876 def fcmp(fn, getnode):
877 t1 = self.wread(fn)
877 t1 = self.wread(fn)
878 return self.file(fn).cmp(getnode(fn), t1)
878 return self.file(fn).cmp(getnode(fn), t1)
879
879
880 def mfmatches(node):
880 def mfmatches(node):
881 change = self.changelog.read(node)
881 change = self.changelog.read(node)
882 mf = self.manifest.read(change[0]).copy()
882 mf = self.manifest.read(change[0]).copy()
883 for fn in mf.keys():
883 for fn in mf.keys():
884 if not match(fn):
884 if not match(fn):
885 del mf[fn]
885 del mf[fn]
886 return mf
886 return mf
887
887
888 modified, added, removed, deleted, unknown = [], [], [], [], []
888 modified, added, removed, deleted, unknown = [], [], [], [], []
889 ignored, clean = [], []
889 ignored, clean = [], []
890
890
891 compareworking = False
891 compareworking = False
892 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
892 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
893 compareworking = True
893 compareworking = True
894
894
895 if not compareworking:
895 if not compareworking:
896 # read the manifest from node1 before the manifest from node2,
896 # read the manifest from node1 before the manifest from node2,
897 # so that we'll hit the manifest cache if we're going through
897 # so that we'll hit the manifest cache if we're going through
898 # all the revisions in parent->child order.
898 # all the revisions in parent->child order.
899 mf1 = mfmatches(node1)
899 mf1 = mfmatches(node1)
900
900
901 # are we comparing the working directory?
901 # are we comparing the working directory?
902 if not node2:
902 if not node2:
903 (lookup, modified, added, removed, deleted, unknown,
903 (lookup, modified, added, removed, deleted, unknown,
904 ignored, clean) = self.dirstate.status(files, match,
904 ignored, clean) = self.dirstate.status(files, match,
905 list_ignored, list_clean)
905 list_ignored, list_clean)
906
906
907 # are we comparing working dir against its parent?
907 # are we comparing working dir against its parent?
908 if compareworking:
908 if compareworking:
909 if lookup:
909 if lookup:
910 fixup = []
910 fixup = []
911 # do a full compare of any files that might have changed
911 # do a full compare of any files that might have changed
912 ctx = self.changectx()
912 ctx = self.changectx()
913 for f in lookup:
913 for f in lookup:
914 if f not in ctx or ctx[f].cmp(self.wread(f)):
914 if f not in ctx or ctx[f].cmp(self.wread(f)):
915 modified.append(f)
915 modified.append(f)
916 else:
916 else:
917 fixup.append(f)
917 fixup.append(f)
918 if list_clean:
918 if list_clean:
919 clean.append(f)
919 clean.append(f)
920
920
921 # update dirstate for files that are actually clean
921 # update dirstate for files that are actually clean
922 if fixup:
922 if fixup:
923 wlock = None
923 wlock = None
924 try:
924 try:
925 try:
925 try:
926 wlock = self.wlock(False)
926 wlock = self.wlock(False)
927 except lock.LockException:
927 except lock.LockException:
928 pass
928 pass
929 if wlock:
929 if wlock:
930 for f in fixup:
930 for f in fixup:
931 self.dirstate.normal(f)
931 self.dirstate.normal(f)
932 finally:
932 finally:
933 del wlock
933 del wlock
934 else:
934 else:
935 # we are comparing working dir against non-parent
935 # we are comparing working dir against non-parent
936 # generate a pseudo-manifest for the working dir
936 # generate a pseudo-manifest for the working dir
937 # XXX: create it in dirstate.py ?
937 # XXX: create it in dirstate.py ?
938 mf2 = mfmatches(self.dirstate.parents()[0])
938 mf2 = mfmatches(self.dirstate.parents()[0])
939 is_exec = util.execfunc(self.root, mf2.execf)
939 is_exec = util.execfunc(self.root, mf2.execf)
940 is_link = util.linkfunc(self.root, mf2.linkf)
940 is_link = util.linkfunc(self.root, mf2.linkf)
941 for f in lookup + modified + added:
941 for f in lookup + modified + added:
942 mf2[f] = ""
942 mf2[f] = ""
943 mf2.set(f, is_exec(f), is_link(f))
943 mf2.set(f, is_exec(f), is_link(f))
944 for f in removed:
944 for f in removed:
945 if f in mf2:
945 if f in mf2:
946 del mf2[f]
946 del mf2[f]
947
947
948 else:
948 else:
949 # we are comparing two revisions
949 # we are comparing two revisions
950 mf2 = mfmatches(node2)
950 mf2 = mfmatches(node2)
951
951
952 if not compareworking:
952 if not compareworking:
953 # flush lists from dirstate before comparing manifests
953 # flush lists from dirstate before comparing manifests
954 modified, added, clean = [], [], []
954 modified, added, clean = [], [], []
955
955
956 # make sure to sort the files so we talk to the disk in a
956 # make sure to sort the files so we talk to the disk in a
957 # reasonable order
957 # reasonable order
958 mf2keys = mf2.keys()
958 mf2keys = mf2.keys()
959 mf2keys.sort()
959 mf2keys.sort()
960 getnode = lambda fn: mf1.get(fn, nullid)
960 getnode = lambda fn: mf1.get(fn, nullid)
961 for fn in mf2keys:
961 for fn in mf2keys:
962 if mf1.has_key(fn):
962 if mf1.has_key(fn):
963 if (mf1.flags(fn) != mf2.flags(fn) or
963 if (mf1.flags(fn) != mf2.flags(fn) or
964 (mf1[fn] != mf2[fn] and
964 (mf1[fn] != mf2[fn] and
965 (mf2[fn] != "" or fcmp(fn, getnode)))):
965 (mf2[fn] != "" or fcmp(fn, getnode)))):
966 modified.append(fn)
966 modified.append(fn)
967 elif list_clean:
967 elif list_clean:
968 clean.append(fn)
968 clean.append(fn)
969 del mf1[fn]
969 del mf1[fn]
970 else:
970 else:
971 added.append(fn)
971 added.append(fn)
972
972
973 removed = mf1.keys()
973 removed = mf1.keys()
974
974
975 # sort and return results:
975 # sort and return results:
976 for l in modified, added, removed, deleted, unknown, ignored, clean:
976 for l in modified, added, removed, deleted, unknown, ignored, clean:
977 l.sort()
977 l.sort()
978 return (modified, added, removed, deleted, unknown, ignored, clean)
978 return (modified, added, removed, deleted, unknown, ignored, clean)
979
979
980 def add(self, list):
980 def add(self, list):
981 wlock = self.wlock()
981 wlock = self.wlock()
982 try:
982 try:
983 for f in list:
983 for f in list:
984 p = self.wjoin(f)
984 p = self.wjoin(f)
985 try:
985 try:
986 st = os.lstat(p)
986 st = os.lstat(p)
987 except:
987 except:
988 self.ui.warn(_("%s does not exist!\n") % f)
988 self.ui.warn(_("%s does not exist!\n") % f)
989 continue
989 continue
990 if st.st_size > 10000000:
990 if st.st_size > 10000000:
991 self.ui.warn(_("%s: files over 10MB may cause memory and"
991 self.ui.warn(_("%s: files over 10MB may cause memory and"
992 " performance problems\n"
992 " performance problems\n"
993 "(use 'hg revert %s' to unadd the file)\n")
993 "(use 'hg revert %s' to unadd the file)\n")
994 % (f, f))
994 % (f, f))
995 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
995 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
996 self.ui.warn(_("%s not added: only files and symlinks "
996 self.ui.warn(_("%s not added: only files and symlinks "
997 "supported currently\n") % f)
997 "supported currently\n") % f)
998 elif self.dirstate[f] in 'amn':
998 elif self.dirstate[f] in 'amn':
999 self.ui.warn(_("%s already tracked!\n") % f)
999 self.ui.warn(_("%s already tracked!\n") % f)
1000 elif self.dirstate[f] == 'r':
1000 elif self.dirstate[f] == 'r':
1001 self.dirstate.normaldirty(f)
1001 self.dirstate.normallookup(f)
1002 else:
1002 else:
1003 self.dirstate.add(f)
1003 self.dirstate.add(f)
1004 finally:
1004 finally:
1005 del wlock
1005 del wlock
1006
1006
1007 def forget(self, list):
1007 def forget(self, list):
1008 wlock = self.wlock()
1008 wlock = self.wlock()
1009 try:
1009 try:
1010 for f in list:
1010 for f in list:
1011 if self.dirstate[f] != 'a':
1011 if self.dirstate[f] != 'a':
1012 self.ui.warn(_("%s not added!\n") % f)
1012 self.ui.warn(_("%s not added!\n") % f)
1013 else:
1013 else:
1014 self.dirstate.forget(f)
1014 self.dirstate.forget(f)
1015 finally:
1015 finally:
1016 del wlock
1016 del wlock
1017
1017
1018 def remove(self, list, unlink=False):
1018 def remove(self, list, unlink=False):
1019 wlock = None
1019 wlock = None
1020 try:
1020 try:
1021 if unlink:
1021 if unlink:
1022 for f in list:
1022 for f in list:
1023 try:
1023 try:
1024 util.unlink(self.wjoin(f))
1024 util.unlink(self.wjoin(f))
1025 except OSError, inst:
1025 except OSError, inst:
1026 if inst.errno != errno.ENOENT:
1026 if inst.errno != errno.ENOENT:
1027 raise
1027 raise
1028 wlock = self.wlock()
1028 wlock = self.wlock()
1029 for f in list:
1029 for f in list:
1030 if unlink and os.path.exists(self.wjoin(f)):
1030 if unlink and os.path.exists(self.wjoin(f)):
1031 self.ui.warn(_("%s still exists!\n") % f)
1031 self.ui.warn(_("%s still exists!\n") % f)
1032 elif self.dirstate[f] == 'a':
1032 elif self.dirstate[f] == 'a':
1033 self.dirstate.forget(f)
1033 self.dirstate.forget(f)
1034 elif f not in self.dirstate:
1034 elif f not in self.dirstate:
1035 self.ui.warn(_("%s not tracked!\n") % f)
1035 self.ui.warn(_("%s not tracked!\n") % f)
1036 else:
1036 else:
1037 self.dirstate.remove(f)
1037 self.dirstate.remove(f)
1038 finally:
1038 finally:
1039 del wlock
1039 del wlock
1040
1040
1041 def undelete(self, list):
1041 def undelete(self, list):
1042 wlock = None
1042 wlock = None
1043 try:
1043 try:
1044 p = self.dirstate.parents()[0]
1044 p = self.dirstate.parents()[0]
1045 mn = self.changelog.read(p)[0]
1045 mn = self.changelog.read(p)[0]
1046 m = self.manifest.read(mn)
1046 m = self.manifest.read(mn)
1047 wlock = self.wlock()
1047 wlock = self.wlock()
1048 for f in list:
1048 for f in list:
1049 if self.dirstate[f] != 'r':
1049 if self.dirstate[f] != 'r':
1050 self.ui.warn("%s not removed!\n" % f)
1050 self.ui.warn("%s not removed!\n" % f)
1051 else:
1051 else:
1052 t = self.file(f).read(m[f])
1052 t = self.file(f).read(m[f])
1053 self.wwrite(f, t, m.flags(f))
1053 self.wwrite(f, t, m.flags(f))
1054 self.dirstate.normal(f)
1054 self.dirstate.normal(f)
1055 finally:
1055 finally:
1056 del wlock
1056 del wlock
1057
1057
1058 def copy(self, source, dest):
1058 def copy(self, source, dest):
1059 wlock = None
1059 wlock = None
1060 try:
1060 try:
1061 p = self.wjoin(dest)
1061 p = self.wjoin(dest)
1062 if not (os.path.exists(p) or os.path.islink(p)):
1062 if not (os.path.exists(p) or os.path.islink(p)):
1063 self.ui.warn(_("%s does not exist!\n") % dest)
1063 self.ui.warn(_("%s does not exist!\n") % dest)
1064 elif not (os.path.isfile(p) or os.path.islink(p)):
1064 elif not (os.path.isfile(p) or os.path.islink(p)):
1065 self.ui.warn(_("copy failed: %s is not a file or a "
1065 self.ui.warn(_("copy failed: %s is not a file or a "
1066 "symbolic link\n") % dest)
1066 "symbolic link\n") % dest)
1067 else:
1067 else:
1068 wlock = self.wlock()
1068 wlock = self.wlock()
1069 if dest not in self.dirstate:
1069 if dest not in self.dirstate:
1070 self.dirstate.add(dest)
1070 self.dirstate.add(dest)
1071 self.dirstate.copy(source, dest)
1071 self.dirstate.copy(source, dest)
1072 finally:
1072 finally:
1073 del wlock
1073 del wlock
1074
1074
1075 def heads(self, start=None):
1075 def heads(self, start=None):
1076 heads = self.changelog.heads(start)
1076 heads = self.changelog.heads(start)
1077 # sort the output in rev descending order
1077 # sort the output in rev descending order
1078 heads = [(-self.changelog.rev(h), h) for h in heads]
1078 heads = [(-self.changelog.rev(h), h) for h in heads]
1079 heads.sort()
1079 heads.sort()
1080 return [n for (r, n) in heads]
1080 return [n for (r, n) in heads]
1081
1081
1082 def branchheads(self, branch, start=None):
1082 def branchheads(self, branch, start=None):
1083 branches = self.branchtags()
1083 branches = self.branchtags()
1084 if branch not in branches:
1084 if branch not in branches:
1085 return []
1085 return []
1086 # The basic algorithm is this:
1086 # The basic algorithm is this:
1087 #
1087 #
1088 # Start from the branch tip since there are no later revisions that can
1088 # Start from the branch tip since there are no later revisions that can
1089 # possibly be in this branch, and the tip is a guaranteed head.
1089 # possibly be in this branch, and the tip is a guaranteed head.
1090 #
1090 #
1091 # Remember the tip's parents as the first ancestors, since these by
1091 # Remember the tip's parents as the first ancestors, since these by
1092 # definition are not heads.
1092 # definition are not heads.
1093 #
1093 #
1094 # Step backwards from the brach tip through all the revisions. We are
1094 # Step backwards from the brach tip through all the revisions. We are
1095 # guaranteed by the rules of Mercurial that we will now be visiting the
1095 # guaranteed by the rules of Mercurial that we will now be visiting the
1096 # nodes in reverse topological order (children before parents).
1096 # nodes in reverse topological order (children before parents).
1097 #
1097 #
1098 # If a revision is one of the ancestors of a head then we can toss it
1098 # If a revision is one of the ancestors of a head then we can toss it
1099 # out of the ancestors set (we've already found it and won't be
1099 # out of the ancestors set (we've already found it and won't be
1100 # visiting it again) and put its parents in the ancestors set.
1100 # visiting it again) and put its parents in the ancestors set.
1101 #
1101 #
1102 # Otherwise, if a revision is in the branch it's another head, since it
1102 # Otherwise, if a revision is in the branch it's another head, since it
1103 # wasn't in the ancestor list of an existing head. So add it to the
1103 # wasn't in the ancestor list of an existing head. So add it to the
1104 # head list, and add its parents to the ancestor list.
1104 # head list, and add its parents to the ancestor list.
1105 #
1105 #
1106 # If it is not in the branch ignore it.
1106 # If it is not in the branch ignore it.
1107 #
1107 #
1108 # Once we have a list of heads, use nodesbetween to filter out all the
1108 # Once we have a list of heads, use nodesbetween to filter out all the
1109 # heads that cannot be reached from startrev. There may be a more
1109 # heads that cannot be reached from startrev. There may be a more
1110 # efficient way to do this as part of the previous algorithm.
1110 # efficient way to do this as part of the previous algorithm.
1111
1111
1112 set = util.set
1112 set = util.set
1113 heads = [self.changelog.rev(branches[branch])]
1113 heads = [self.changelog.rev(branches[branch])]
1114 # Don't care if ancestors contains nullrev or not.
1114 # Don't care if ancestors contains nullrev or not.
1115 ancestors = set(self.changelog.parentrevs(heads[0]))
1115 ancestors = set(self.changelog.parentrevs(heads[0]))
1116 for rev in xrange(heads[0] - 1, nullrev, -1):
1116 for rev in xrange(heads[0] - 1, nullrev, -1):
1117 if rev in ancestors:
1117 if rev in ancestors:
1118 ancestors.update(self.changelog.parentrevs(rev))
1118 ancestors.update(self.changelog.parentrevs(rev))
1119 ancestors.remove(rev)
1119 ancestors.remove(rev)
1120 elif self.changectx(rev).branch() == branch:
1120 elif self.changectx(rev).branch() == branch:
1121 heads.append(rev)
1121 heads.append(rev)
1122 ancestors.update(self.changelog.parentrevs(rev))
1122 ancestors.update(self.changelog.parentrevs(rev))
1123 heads = [self.changelog.node(rev) for rev in heads]
1123 heads = [self.changelog.node(rev) for rev in heads]
1124 if start is not None:
1124 if start is not None:
1125 heads = self.changelog.nodesbetween([start], heads)[2]
1125 heads = self.changelog.nodesbetween([start], heads)[2]
1126 return heads
1126 return heads
1127
1127
1128 def branches(self, nodes):
1128 def branches(self, nodes):
1129 if not nodes:
1129 if not nodes:
1130 nodes = [self.changelog.tip()]
1130 nodes = [self.changelog.tip()]
1131 b = []
1131 b = []
1132 for n in nodes:
1132 for n in nodes:
1133 t = n
1133 t = n
1134 while 1:
1134 while 1:
1135 p = self.changelog.parents(n)
1135 p = self.changelog.parents(n)
1136 if p[1] != nullid or p[0] == nullid:
1136 if p[1] != nullid or p[0] == nullid:
1137 b.append((t, n, p[0], p[1]))
1137 b.append((t, n, p[0], p[1]))
1138 break
1138 break
1139 n = p[0]
1139 n = p[0]
1140 return b
1140 return b
1141
1141
1142 def between(self, pairs):
1142 def between(self, pairs):
1143 r = []
1143 r = []
1144
1144
1145 for top, bottom in pairs:
1145 for top, bottom in pairs:
1146 n, l, i = top, [], 0
1146 n, l, i = top, [], 0
1147 f = 1
1147 f = 1
1148
1148
1149 while n != bottom:
1149 while n != bottom:
1150 p = self.changelog.parents(n)[0]
1150 p = self.changelog.parents(n)[0]
1151 if i == f:
1151 if i == f:
1152 l.append(n)
1152 l.append(n)
1153 f = f * 2
1153 f = f * 2
1154 n = p
1154 n = p
1155 i += 1
1155 i += 1
1156
1156
1157 r.append(l)
1157 r.append(l)
1158
1158
1159 return r
1159 return r
1160
1160
1161 def findincoming(self, remote, base=None, heads=None, force=False):
1161 def findincoming(self, remote, base=None, heads=None, force=False):
1162 """Return list of roots of the subsets of missing nodes from remote
1162 """Return list of roots of the subsets of missing nodes from remote
1163
1163
1164 If base dict is specified, assume that these nodes and their parents
1164 If base dict is specified, assume that these nodes and their parents
1165 exist on the remote side and that no child of a node of base exists
1165 exist on the remote side and that no child of a node of base exists
1166 in both remote and self.
1166 in both remote and self.
1167 Furthermore base will be updated to include the nodes that exists
1167 Furthermore base will be updated to include the nodes that exists
1168 in self and remote but no children exists in self and remote.
1168 in self and remote but no children exists in self and remote.
1169 If a list of heads is specified, return only nodes which are heads
1169 If a list of heads is specified, return only nodes which are heads
1170 or ancestors of these heads.
1170 or ancestors of these heads.
1171
1171
1172 All the ancestors of base are in self and in remote.
1172 All the ancestors of base are in self and in remote.
1173 All the descendants of the list returned are missing in self.
1173 All the descendants of the list returned are missing in self.
1174 (and so we know that the rest of the nodes are missing in remote, see
1174 (and so we know that the rest of the nodes are missing in remote, see
1175 outgoing)
1175 outgoing)
1176 """
1176 """
1177 m = self.changelog.nodemap
1177 m = self.changelog.nodemap
1178 search = []
1178 search = []
1179 fetch = {}
1179 fetch = {}
1180 seen = {}
1180 seen = {}
1181 seenbranch = {}
1181 seenbranch = {}
1182 if base == None:
1182 if base == None:
1183 base = {}
1183 base = {}
1184
1184
1185 if not heads:
1185 if not heads:
1186 heads = remote.heads()
1186 heads = remote.heads()
1187
1187
1188 if self.changelog.tip() == nullid:
1188 if self.changelog.tip() == nullid:
1189 base[nullid] = 1
1189 base[nullid] = 1
1190 if heads != [nullid]:
1190 if heads != [nullid]:
1191 return [nullid]
1191 return [nullid]
1192 return []
1192 return []
1193
1193
1194 # assume we're closer to the tip than the root
1194 # assume we're closer to the tip than the root
1195 # and start by examining the heads
1195 # and start by examining the heads
1196 self.ui.status(_("searching for changes\n"))
1196 self.ui.status(_("searching for changes\n"))
1197
1197
1198 unknown = []
1198 unknown = []
1199 for h in heads:
1199 for h in heads:
1200 if h not in m:
1200 if h not in m:
1201 unknown.append(h)
1201 unknown.append(h)
1202 else:
1202 else:
1203 base[h] = 1
1203 base[h] = 1
1204
1204
1205 if not unknown:
1205 if not unknown:
1206 return []
1206 return []
1207
1207
1208 req = dict.fromkeys(unknown)
1208 req = dict.fromkeys(unknown)
1209 reqcnt = 0
1209 reqcnt = 0
1210
1210
1211 # search through remote branches
1211 # search through remote branches
1212 # a 'branch' here is a linear segment of history, with four parts:
1212 # a 'branch' here is a linear segment of history, with four parts:
1213 # head, root, first parent, second parent
1213 # head, root, first parent, second parent
1214 # (a branch always has two parents (or none) by definition)
1214 # (a branch always has two parents (or none) by definition)
1215 unknown = remote.branches(unknown)
1215 unknown = remote.branches(unknown)
1216 while unknown:
1216 while unknown:
1217 r = []
1217 r = []
1218 while unknown:
1218 while unknown:
1219 n = unknown.pop(0)
1219 n = unknown.pop(0)
1220 if n[0] in seen:
1220 if n[0] in seen:
1221 continue
1221 continue
1222
1222
1223 self.ui.debug(_("examining %s:%s\n")
1223 self.ui.debug(_("examining %s:%s\n")
1224 % (short(n[0]), short(n[1])))
1224 % (short(n[0]), short(n[1])))
1225 if n[0] == nullid: # found the end of the branch
1225 if n[0] == nullid: # found the end of the branch
1226 pass
1226 pass
1227 elif n in seenbranch:
1227 elif n in seenbranch:
1228 self.ui.debug(_("branch already found\n"))
1228 self.ui.debug(_("branch already found\n"))
1229 continue
1229 continue
1230 elif n[1] and n[1] in m: # do we know the base?
1230 elif n[1] and n[1] in m: # do we know the base?
1231 self.ui.debug(_("found incomplete branch %s:%s\n")
1231 self.ui.debug(_("found incomplete branch %s:%s\n")
1232 % (short(n[0]), short(n[1])))
1232 % (short(n[0]), short(n[1])))
1233 search.append(n) # schedule branch range for scanning
1233 search.append(n) # schedule branch range for scanning
1234 seenbranch[n] = 1
1234 seenbranch[n] = 1
1235 else:
1235 else:
1236 if n[1] not in seen and n[1] not in fetch:
1236 if n[1] not in seen and n[1] not in fetch:
1237 if n[2] in m and n[3] in m:
1237 if n[2] in m and n[3] in m:
1238 self.ui.debug(_("found new changeset %s\n") %
1238 self.ui.debug(_("found new changeset %s\n") %
1239 short(n[1]))
1239 short(n[1]))
1240 fetch[n[1]] = 1 # earliest unknown
1240 fetch[n[1]] = 1 # earliest unknown
1241 for p in n[2:4]:
1241 for p in n[2:4]:
1242 if p in m:
1242 if p in m:
1243 base[p] = 1 # latest known
1243 base[p] = 1 # latest known
1244
1244
1245 for p in n[2:4]:
1245 for p in n[2:4]:
1246 if p not in req and p not in m:
1246 if p not in req and p not in m:
1247 r.append(p)
1247 r.append(p)
1248 req[p] = 1
1248 req[p] = 1
1249 seen[n[0]] = 1
1249 seen[n[0]] = 1
1250
1250
1251 if r:
1251 if r:
1252 reqcnt += 1
1252 reqcnt += 1
1253 self.ui.debug(_("request %d: %s\n") %
1253 self.ui.debug(_("request %d: %s\n") %
1254 (reqcnt, " ".join(map(short, r))))
1254 (reqcnt, " ".join(map(short, r))))
1255 for p in xrange(0, len(r), 10):
1255 for p in xrange(0, len(r), 10):
1256 for b in remote.branches(r[p:p+10]):
1256 for b in remote.branches(r[p:p+10]):
1257 self.ui.debug(_("received %s:%s\n") %
1257 self.ui.debug(_("received %s:%s\n") %
1258 (short(b[0]), short(b[1])))
1258 (short(b[0]), short(b[1])))
1259 unknown.append(b)
1259 unknown.append(b)
1260
1260
1261 # do binary search on the branches we found
1261 # do binary search on the branches we found
1262 while search:
1262 while search:
1263 n = search.pop(0)
1263 n = search.pop(0)
1264 reqcnt += 1
1264 reqcnt += 1
1265 l = remote.between([(n[0], n[1])])[0]
1265 l = remote.between([(n[0], n[1])])[0]
1266 l.append(n[1])
1266 l.append(n[1])
1267 p = n[0]
1267 p = n[0]
1268 f = 1
1268 f = 1
1269 for i in l:
1269 for i in l:
1270 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1270 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1271 if i in m:
1271 if i in m:
1272 if f <= 2:
1272 if f <= 2:
1273 self.ui.debug(_("found new branch changeset %s\n") %
1273 self.ui.debug(_("found new branch changeset %s\n") %
1274 short(p))
1274 short(p))
1275 fetch[p] = 1
1275 fetch[p] = 1
1276 base[i] = 1
1276 base[i] = 1
1277 else:
1277 else:
1278 self.ui.debug(_("narrowed branch search to %s:%s\n")
1278 self.ui.debug(_("narrowed branch search to %s:%s\n")
1279 % (short(p), short(i)))
1279 % (short(p), short(i)))
1280 search.append((p, i))
1280 search.append((p, i))
1281 break
1281 break
1282 p, f = i, f * 2
1282 p, f = i, f * 2
1283
1283
1284 # sanity check our fetch list
1284 # sanity check our fetch list
1285 for f in fetch.keys():
1285 for f in fetch.keys():
1286 if f in m:
1286 if f in m:
1287 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1287 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1288
1288
1289 if base.keys() == [nullid]:
1289 if base.keys() == [nullid]:
1290 if force:
1290 if force:
1291 self.ui.warn(_("warning: repository is unrelated\n"))
1291 self.ui.warn(_("warning: repository is unrelated\n"))
1292 else:
1292 else:
1293 raise util.Abort(_("repository is unrelated"))
1293 raise util.Abort(_("repository is unrelated"))
1294
1294
1295 self.ui.debug(_("found new changesets starting at ") +
1295 self.ui.debug(_("found new changesets starting at ") +
1296 " ".join([short(f) for f in fetch]) + "\n")
1296 " ".join([short(f) for f in fetch]) + "\n")
1297
1297
1298 self.ui.debug(_("%d total queries\n") % reqcnt)
1298 self.ui.debug(_("%d total queries\n") % reqcnt)
1299
1299
1300 return fetch.keys()
1300 return fetch.keys()
1301
1301
1302 def findoutgoing(self, remote, base=None, heads=None, force=False):
1302 def findoutgoing(self, remote, base=None, heads=None, force=False):
1303 """Return list of nodes that are roots of subsets not in remote
1303 """Return list of nodes that are roots of subsets not in remote
1304
1304
1305 If base dict is specified, assume that these nodes and their parents
1305 If base dict is specified, assume that these nodes and their parents
1306 exist on the remote side.
1306 exist on the remote side.
1307 If a list of heads is specified, return only nodes which are heads
1307 If a list of heads is specified, return only nodes which are heads
1308 or ancestors of these heads, and return a second element which
1308 or ancestors of these heads, and return a second element which
1309 contains all remote heads which get new children.
1309 contains all remote heads which get new children.
1310 """
1310 """
1311 if base == None:
1311 if base == None:
1312 base = {}
1312 base = {}
1313 self.findincoming(remote, base, heads, force=force)
1313 self.findincoming(remote, base, heads, force=force)
1314
1314
1315 self.ui.debug(_("common changesets up to ")
1315 self.ui.debug(_("common changesets up to ")
1316 + " ".join(map(short, base.keys())) + "\n")
1316 + " ".join(map(short, base.keys())) + "\n")
1317
1317
1318 remain = dict.fromkeys(self.changelog.nodemap)
1318 remain = dict.fromkeys(self.changelog.nodemap)
1319
1319
1320 # prune everything remote has from the tree
1320 # prune everything remote has from the tree
1321 del remain[nullid]
1321 del remain[nullid]
1322 remove = base.keys()
1322 remove = base.keys()
1323 while remove:
1323 while remove:
1324 n = remove.pop(0)
1324 n = remove.pop(0)
1325 if n in remain:
1325 if n in remain:
1326 del remain[n]
1326 del remain[n]
1327 for p in self.changelog.parents(n):
1327 for p in self.changelog.parents(n):
1328 remove.append(p)
1328 remove.append(p)
1329
1329
1330 # find every node whose parents have been pruned
1330 # find every node whose parents have been pruned
1331 subset = []
1331 subset = []
1332 # find every remote head that will get new children
1332 # find every remote head that will get new children
1333 updated_heads = {}
1333 updated_heads = {}
1334 for n in remain:
1334 for n in remain:
1335 p1, p2 = self.changelog.parents(n)
1335 p1, p2 = self.changelog.parents(n)
1336 if p1 not in remain and p2 not in remain:
1336 if p1 not in remain and p2 not in remain:
1337 subset.append(n)
1337 subset.append(n)
1338 if heads:
1338 if heads:
1339 if p1 in heads:
1339 if p1 in heads:
1340 updated_heads[p1] = True
1340 updated_heads[p1] = True
1341 if p2 in heads:
1341 if p2 in heads:
1342 updated_heads[p2] = True
1342 updated_heads[p2] = True
1343
1343
1344 # this is the set of all roots we have to push
1344 # this is the set of all roots we have to push
1345 if heads:
1345 if heads:
1346 return subset, updated_heads.keys()
1346 return subset, updated_heads.keys()
1347 else:
1347 else:
1348 return subset
1348 return subset
1349
1349
1350 def pull(self, remote, heads=None, force=False):
1350 def pull(self, remote, heads=None, force=False):
1351 lock = self.lock()
1351 lock = self.lock()
1352 try:
1352 try:
1353 fetch = self.findincoming(remote, force=force)
1353 fetch = self.findincoming(remote, force=force)
1354 if fetch == [nullid]:
1354 if fetch == [nullid]:
1355 self.ui.status(_("requesting all changes\n"))
1355 self.ui.status(_("requesting all changes\n"))
1356
1356
1357 if not fetch:
1357 if not fetch:
1358 self.ui.status(_("no changes found\n"))
1358 self.ui.status(_("no changes found\n"))
1359 return 0
1359 return 0
1360
1360
1361 if heads is None:
1361 if heads is None:
1362 cg = remote.changegroup(fetch, 'pull')
1362 cg = remote.changegroup(fetch, 'pull')
1363 else:
1363 else:
1364 if 'changegroupsubset' not in remote.capabilities:
1364 if 'changegroupsubset' not in remote.capabilities:
1365 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1365 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1366 cg = remote.changegroupsubset(fetch, heads, 'pull')
1366 cg = remote.changegroupsubset(fetch, heads, 'pull')
1367 return self.addchangegroup(cg, 'pull', remote.url())
1367 return self.addchangegroup(cg, 'pull', remote.url())
1368 finally:
1368 finally:
1369 del lock
1369 del lock
1370
1370
1371 def push(self, remote, force=False, revs=None):
1371 def push(self, remote, force=False, revs=None):
1372 # there are two ways to push to remote repo:
1372 # there are two ways to push to remote repo:
1373 #
1373 #
1374 # addchangegroup assumes local user can lock remote
1374 # addchangegroup assumes local user can lock remote
1375 # repo (local filesystem, old ssh servers).
1375 # repo (local filesystem, old ssh servers).
1376 #
1376 #
1377 # unbundle assumes local user cannot lock remote repo (new ssh
1377 # unbundle assumes local user cannot lock remote repo (new ssh
1378 # servers, http servers).
1378 # servers, http servers).
1379
1379
1380 if remote.capable('unbundle'):
1380 if remote.capable('unbundle'):
1381 return self.push_unbundle(remote, force, revs)
1381 return self.push_unbundle(remote, force, revs)
1382 return self.push_addchangegroup(remote, force, revs)
1382 return self.push_addchangegroup(remote, force, revs)
1383
1383
1384 def prepush(self, remote, force, revs):
1384 def prepush(self, remote, force, revs):
1385 base = {}
1385 base = {}
1386 remote_heads = remote.heads()
1386 remote_heads = remote.heads()
1387 inc = self.findincoming(remote, base, remote_heads, force=force)
1387 inc = self.findincoming(remote, base, remote_heads, force=force)
1388
1388
1389 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1389 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1390 if revs is not None:
1390 if revs is not None:
1391 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1391 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1392 else:
1392 else:
1393 bases, heads = update, self.changelog.heads()
1393 bases, heads = update, self.changelog.heads()
1394
1394
1395 if not bases:
1395 if not bases:
1396 self.ui.status(_("no changes found\n"))
1396 self.ui.status(_("no changes found\n"))
1397 return None, 1
1397 return None, 1
1398 elif not force:
1398 elif not force:
1399 # check if we're creating new remote heads
1399 # check if we're creating new remote heads
1400 # to be a remote head after push, node must be either
1400 # to be a remote head after push, node must be either
1401 # - unknown locally
1401 # - unknown locally
1402 # - a local outgoing head descended from update
1402 # - a local outgoing head descended from update
1403 # - a remote head that's known locally and not
1403 # - a remote head that's known locally and not
1404 # ancestral to an outgoing head
1404 # ancestral to an outgoing head
1405
1405
1406 warn = 0
1406 warn = 0
1407
1407
1408 if remote_heads == [nullid]:
1408 if remote_heads == [nullid]:
1409 warn = 0
1409 warn = 0
1410 elif not revs and len(heads) > len(remote_heads):
1410 elif not revs and len(heads) > len(remote_heads):
1411 warn = 1
1411 warn = 1
1412 else:
1412 else:
1413 newheads = list(heads)
1413 newheads = list(heads)
1414 for r in remote_heads:
1414 for r in remote_heads:
1415 if r in self.changelog.nodemap:
1415 if r in self.changelog.nodemap:
1416 desc = self.changelog.heads(r, heads)
1416 desc = self.changelog.heads(r, heads)
1417 l = [h for h in heads if h in desc]
1417 l = [h for h in heads if h in desc]
1418 if not l:
1418 if not l:
1419 newheads.append(r)
1419 newheads.append(r)
1420 else:
1420 else:
1421 newheads.append(r)
1421 newheads.append(r)
1422 if len(newheads) > len(remote_heads):
1422 if len(newheads) > len(remote_heads):
1423 warn = 1
1423 warn = 1
1424
1424
1425 if warn:
1425 if warn:
1426 self.ui.warn(_("abort: push creates new remote branches!\n"))
1426 self.ui.warn(_("abort: push creates new remote branches!\n"))
1427 self.ui.status(_("(did you forget to merge?"
1427 self.ui.status(_("(did you forget to merge?"
1428 " use push -f to force)\n"))
1428 " use push -f to force)\n"))
1429 return None, 1
1429 return None, 1
1430 elif inc:
1430 elif inc:
1431 self.ui.warn(_("note: unsynced remote changes!\n"))
1431 self.ui.warn(_("note: unsynced remote changes!\n"))
1432
1432
1433
1433
1434 if revs is None:
1434 if revs is None:
1435 cg = self.changegroup(update, 'push')
1435 cg = self.changegroup(update, 'push')
1436 else:
1436 else:
1437 cg = self.changegroupsubset(update, revs, 'push')
1437 cg = self.changegroupsubset(update, revs, 'push')
1438 return cg, remote_heads
1438 return cg, remote_heads
1439
1439
1440 def push_addchangegroup(self, remote, force, revs):
1440 def push_addchangegroup(self, remote, force, revs):
1441 lock = remote.lock()
1441 lock = remote.lock()
1442 try:
1442 try:
1443 ret = self.prepush(remote, force, revs)
1443 ret = self.prepush(remote, force, revs)
1444 if ret[0] is not None:
1444 if ret[0] is not None:
1445 cg, remote_heads = ret
1445 cg, remote_heads = ret
1446 return remote.addchangegroup(cg, 'push', self.url())
1446 return remote.addchangegroup(cg, 'push', self.url())
1447 return ret[1]
1447 return ret[1]
1448 finally:
1448 finally:
1449 del lock
1449 del lock
1450
1450
1451 def push_unbundle(self, remote, force, revs):
1451 def push_unbundle(self, remote, force, revs):
1452 # local repo finds heads on server, finds out what revs it
1452 # local repo finds heads on server, finds out what revs it
1453 # must push. once revs transferred, if server finds it has
1453 # must push. once revs transferred, if server finds it has
1454 # different heads (someone else won commit/push race), server
1454 # different heads (someone else won commit/push race), server
1455 # aborts.
1455 # aborts.
1456
1456
1457 ret = self.prepush(remote, force, revs)
1457 ret = self.prepush(remote, force, revs)
1458 if ret[0] is not None:
1458 if ret[0] is not None:
1459 cg, remote_heads = ret
1459 cg, remote_heads = ret
1460 if force: remote_heads = ['force']
1460 if force: remote_heads = ['force']
1461 return remote.unbundle(cg, remote_heads, 'push')
1461 return remote.unbundle(cg, remote_heads, 'push')
1462 return ret[1]
1462 return ret[1]
1463
1463
1464 def changegroupinfo(self, nodes):
1464 def changegroupinfo(self, nodes):
1465 self.ui.note(_("%d changesets found\n") % len(nodes))
1465 self.ui.note(_("%d changesets found\n") % len(nodes))
1466 if self.ui.debugflag:
1466 if self.ui.debugflag:
1467 self.ui.debug(_("List of changesets:\n"))
1467 self.ui.debug(_("List of changesets:\n"))
1468 for node in nodes:
1468 for node in nodes:
1469 self.ui.debug("%s\n" % hex(node))
1469 self.ui.debug("%s\n" % hex(node))
1470
1470
1471 def changegroupsubset(self, bases, heads, source):
1471 def changegroupsubset(self, bases, heads, source):
1472 """This function generates a changegroup consisting of all the nodes
1472 """This function generates a changegroup consisting of all the nodes
1473 that are descendents of any of the bases, and ancestors of any of
1473 that are descendents of any of the bases, and ancestors of any of
1474 the heads.
1474 the heads.
1475
1475
1476 It is fairly complex as determining which filenodes and which
1476 It is fairly complex as determining which filenodes and which
1477 manifest nodes need to be included for the changeset to be complete
1477 manifest nodes need to be included for the changeset to be complete
1478 is non-trivial.
1478 is non-trivial.
1479
1479
1480 Another wrinkle is doing the reverse, figuring out which changeset in
1480 Another wrinkle is doing the reverse, figuring out which changeset in
1481 the changegroup a particular filenode or manifestnode belongs to."""
1481 the changegroup a particular filenode or manifestnode belongs to."""
1482
1482
1483 self.hook('preoutgoing', throw=True, source=source)
1483 self.hook('preoutgoing', throw=True, source=source)
1484
1484
1485 # Set up some initial variables
1485 # Set up some initial variables
1486 # Make it easy to refer to self.changelog
1486 # Make it easy to refer to self.changelog
1487 cl = self.changelog
1487 cl = self.changelog
1488 # msng is short for missing - compute the list of changesets in this
1488 # msng is short for missing - compute the list of changesets in this
1489 # changegroup.
1489 # changegroup.
1490 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1490 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1491 self.changegroupinfo(msng_cl_lst)
1491 self.changegroupinfo(msng_cl_lst)
1492 # Some bases may turn out to be superfluous, and some heads may be
1492 # Some bases may turn out to be superfluous, and some heads may be
1493 # too. nodesbetween will return the minimal set of bases and heads
1493 # too. nodesbetween will return the minimal set of bases and heads
1494 # necessary to re-create the changegroup.
1494 # necessary to re-create the changegroup.
1495
1495
1496 # Known heads are the list of heads that it is assumed the recipient
1496 # Known heads are the list of heads that it is assumed the recipient
1497 # of this changegroup will know about.
1497 # of this changegroup will know about.
1498 knownheads = {}
1498 knownheads = {}
1499 # We assume that all parents of bases are known heads.
1499 # We assume that all parents of bases are known heads.
1500 for n in bases:
1500 for n in bases:
1501 for p in cl.parents(n):
1501 for p in cl.parents(n):
1502 if p != nullid:
1502 if p != nullid:
1503 knownheads[p] = 1
1503 knownheads[p] = 1
1504 knownheads = knownheads.keys()
1504 knownheads = knownheads.keys()
1505 if knownheads:
1505 if knownheads:
1506 # Now that we know what heads are known, we can compute which
1506 # Now that we know what heads are known, we can compute which
1507 # changesets are known. The recipient must know about all
1507 # changesets are known. The recipient must know about all
1508 # changesets required to reach the known heads from the null
1508 # changesets required to reach the known heads from the null
1509 # changeset.
1509 # changeset.
1510 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1510 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1511 junk = None
1511 junk = None
1512 # Transform the list into an ersatz set.
1512 # Transform the list into an ersatz set.
1513 has_cl_set = dict.fromkeys(has_cl_set)
1513 has_cl_set = dict.fromkeys(has_cl_set)
1514 else:
1514 else:
1515 # If there were no known heads, the recipient cannot be assumed to
1515 # If there were no known heads, the recipient cannot be assumed to
1516 # know about any changesets.
1516 # know about any changesets.
1517 has_cl_set = {}
1517 has_cl_set = {}
1518
1518
1519 # Make it easy to refer to self.manifest
1519 # Make it easy to refer to self.manifest
1520 mnfst = self.manifest
1520 mnfst = self.manifest
1521 # We don't know which manifests are missing yet
1521 # We don't know which manifests are missing yet
1522 msng_mnfst_set = {}
1522 msng_mnfst_set = {}
1523 # Nor do we know which filenodes are missing.
1523 # Nor do we know which filenodes are missing.
1524 msng_filenode_set = {}
1524 msng_filenode_set = {}
1525
1525
1526 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1526 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1527 junk = None
1527 junk = None
1528
1528
1529 # A changeset always belongs to itself, so the changenode lookup
1529 # A changeset always belongs to itself, so the changenode lookup
1530 # function for a changenode is identity.
1530 # function for a changenode is identity.
1531 def identity(x):
1531 def identity(x):
1532 return x
1532 return x
1533
1533
1534 # A function generating function. Sets up an environment for the
1534 # A function generating function. Sets up an environment for the
1535 # inner function.
1535 # inner function.
1536 def cmp_by_rev_func(revlog):
1536 def cmp_by_rev_func(revlog):
1537 # Compare two nodes by their revision number in the environment's
1537 # Compare two nodes by their revision number in the environment's
1538 # revision history. Since the revision number both represents the
1538 # revision history. Since the revision number both represents the
1539 # most efficient order to read the nodes in, and represents a
1539 # most efficient order to read the nodes in, and represents a
1540 # topological sorting of the nodes, this function is often useful.
1540 # topological sorting of the nodes, this function is often useful.
1541 def cmp_by_rev(a, b):
1541 def cmp_by_rev(a, b):
1542 return cmp(revlog.rev(a), revlog.rev(b))
1542 return cmp(revlog.rev(a), revlog.rev(b))
1543 return cmp_by_rev
1543 return cmp_by_rev
1544
1544
1545 # If we determine that a particular file or manifest node must be a
1545 # If we determine that a particular file or manifest node must be a
1546 # node that the recipient of the changegroup will already have, we can
1546 # node that the recipient of the changegroup will already have, we can
1547 # also assume the recipient will have all the parents. This function
1547 # also assume the recipient will have all the parents. This function
1548 # prunes them from the set of missing nodes.
1548 # prunes them from the set of missing nodes.
1549 def prune_parents(revlog, hasset, msngset):
1549 def prune_parents(revlog, hasset, msngset):
1550 haslst = hasset.keys()
1550 haslst = hasset.keys()
1551 haslst.sort(cmp_by_rev_func(revlog))
1551 haslst.sort(cmp_by_rev_func(revlog))
1552 for node in haslst:
1552 for node in haslst:
1553 parentlst = [p for p in revlog.parents(node) if p != nullid]
1553 parentlst = [p for p in revlog.parents(node) if p != nullid]
1554 while parentlst:
1554 while parentlst:
1555 n = parentlst.pop()
1555 n = parentlst.pop()
1556 if n not in hasset:
1556 if n not in hasset:
1557 hasset[n] = 1
1557 hasset[n] = 1
1558 p = [p for p in revlog.parents(n) if p != nullid]
1558 p = [p for p in revlog.parents(n) if p != nullid]
1559 parentlst.extend(p)
1559 parentlst.extend(p)
1560 for n in hasset:
1560 for n in hasset:
1561 msngset.pop(n, None)
1561 msngset.pop(n, None)
1562
1562
1563 # This is a function generating function used to set up an environment
1563 # This is a function generating function used to set up an environment
1564 # for the inner function to execute in.
1564 # for the inner function to execute in.
1565 def manifest_and_file_collector(changedfileset):
1565 def manifest_and_file_collector(changedfileset):
1566 # This is an information gathering function that gathers
1566 # This is an information gathering function that gathers
1567 # information from each changeset node that goes out as part of
1567 # information from each changeset node that goes out as part of
1568 # the changegroup. The information gathered is a list of which
1568 # the changegroup. The information gathered is a list of which
1569 # manifest nodes are potentially required (the recipient may
1569 # manifest nodes are potentially required (the recipient may
1570 # already have them) and total list of all files which were
1570 # already have them) and total list of all files which were
1571 # changed in any changeset in the changegroup.
1571 # changed in any changeset in the changegroup.
1572 #
1572 #
1573 # We also remember the first changenode we saw any manifest
1573 # We also remember the first changenode we saw any manifest
1574 # referenced by so we can later determine which changenode 'owns'
1574 # referenced by so we can later determine which changenode 'owns'
1575 # the manifest.
1575 # the manifest.
1576 def collect_manifests_and_files(clnode):
1576 def collect_manifests_and_files(clnode):
1577 c = cl.read(clnode)
1577 c = cl.read(clnode)
1578 for f in c[3]:
1578 for f in c[3]:
1579 # This is to make sure we only have one instance of each
1579 # This is to make sure we only have one instance of each
1580 # filename string for each filename.
1580 # filename string for each filename.
1581 changedfileset.setdefault(f, f)
1581 changedfileset.setdefault(f, f)
1582 msng_mnfst_set.setdefault(c[0], clnode)
1582 msng_mnfst_set.setdefault(c[0], clnode)
1583 return collect_manifests_and_files
1583 return collect_manifests_and_files
1584
1584
1585 # Figure out which manifest nodes (of the ones we think might be part
1585 # Figure out which manifest nodes (of the ones we think might be part
1586 # of the changegroup) the recipient must know about and remove them
1586 # of the changegroup) the recipient must know about and remove them
1587 # from the changegroup.
1587 # from the changegroup.
1588 def prune_manifests():
1588 def prune_manifests():
1589 has_mnfst_set = {}
1589 has_mnfst_set = {}
1590 for n in msng_mnfst_set:
1590 for n in msng_mnfst_set:
1591 # If a 'missing' manifest thinks it belongs to a changenode
1591 # If a 'missing' manifest thinks it belongs to a changenode
1592 # the recipient is assumed to have, obviously the recipient
1592 # the recipient is assumed to have, obviously the recipient
1593 # must have that manifest.
1593 # must have that manifest.
1594 linknode = cl.node(mnfst.linkrev(n))
1594 linknode = cl.node(mnfst.linkrev(n))
1595 if linknode in has_cl_set:
1595 if linknode in has_cl_set:
1596 has_mnfst_set[n] = 1
1596 has_mnfst_set[n] = 1
1597 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1597 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1598
1598
1599 # Use the information collected in collect_manifests_and_files to say
1599 # Use the information collected in collect_manifests_and_files to say
1600 # which changenode any manifestnode belongs to.
1600 # which changenode any manifestnode belongs to.
1601 def lookup_manifest_link(mnfstnode):
1601 def lookup_manifest_link(mnfstnode):
1602 return msng_mnfst_set[mnfstnode]
1602 return msng_mnfst_set[mnfstnode]
1603
1603
1604 # A function generating function that sets up the initial environment
1604 # A function generating function that sets up the initial environment
1605 # the inner function.
1605 # the inner function.
1606 def filenode_collector(changedfiles):
1606 def filenode_collector(changedfiles):
1607 next_rev = [0]
1607 next_rev = [0]
1608 # This gathers information from each manifestnode included in the
1608 # This gathers information from each manifestnode included in the
1609 # changegroup about which filenodes the manifest node references
1609 # changegroup about which filenodes the manifest node references
1610 # so we can include those in the changegroup too.
1610 # so we can include those in the changegroup too.
1611 #
1611 #
1612 # It also remembers which changenode each filenode belongs to. It
1612 # It also remembers which changenode each filenode belongs to. It
1613 # does this by assuming the a filenode belongs to the changenode
1613 # does this by assuming the a filenode belongs to the changenode
1614 # the first manifest that references it belongs to.
1614 # the first manifest that references it belongs to.
1615 def collect_msng_filenodes(mnfstnode):
1615 def collect_msng_filenodes(mnfstnode):
1616 r = mnfst.rev(mnfstnode)
1616 r = mnfst.rev(mnfstnode)
1617 if r == next_rev[0]:
1617 if r == next_rev[0]:
1618 # If the last rev we looked at was the one just previous,
1618 # If the last rev we looked at was the one just previous,
1619 # we only need to see a diff.
1619 # we only need to see a diff.
1620 deltamf = mnfst.readdelta(mnfstnode)
1620 deltamf = mnfst.readdelta(mnfstnode)
1621 # For each line in the delta
1621 # For each line in the delta
1622 for f, fnode in deltamf.items():
1622 for f, fnode in deltamf.items():
1623 f = changedfiles.get(f, None)
1623 f = changedfiles.get(f, None)
1624 # And if the file is in the list of files we care
1624 # And if the file is in the list of files we care
1625 # about.
1625 # about.
1626 if f is not None:
1626 if f is not None:
1627 # Get the changenode this manifest belongs to
1627 # Get the changenode this manifest belongs to
1628 clnode = msng_mnfst_set[mnfstnode]
1628 clnode = msng_mnfst_set[mnfstnode]
1629 # Create the set of filenodes for the file if
1629 # Create the set of filenodes for the file if
1630 # there isn't one already.
1630 # there isn't one already.
1631 ndset = msng_filenode_set.setdefault(f, {})
1631 ndset = msng_filenode_set.setdefault(f, {})
1632 # And set the filenode's changelog node to the
1632 # And set the filenode's changelog node to the
1633 # manifest's if it hasn't been set already.
1633 # manifest's if it hasn't been set already.
1634 ndset.setdefault(fnode, clnode)
1634 ndset.setdefault(fnode, clnode)
1635 else:
1635 else:
1636 # Otherwise we need a full manifest.
1636 # Otherwise we need a full manifest.
1637 m = mnfst.read(mnfstnode)
1637 m = mnfst.read(mnfstnode)
1638 # For every file in we care about.
1638 # For every file in we care about.
1639 for f in changedfiles:
1639 for f in changedfiles:
1640 fnode = m.get(f, None)
1640 fnode = m.get(f, None)
1641 # If it's in the manifest
1641 # If it's in the manifest
1642 if fnode is not None:
1642 if fnode is not None:
1643 # See comments above.
1643 # See comments above.
1644 clnode = msng_mnfst_set[mnfstnode]
1644 clnode = msng_mnfst_set[mnfstnode]
1645 ndset = msng_filenode_set.setdefault(f, {})
1645 ndset = msng_filenode_set.setdefault(f, {})
1646 ndset.setdefault(fnode, clnode)
1646 ndset.setdefault(fnode, clnode)
1647 # Remember the revision we hope to see next.
1647 # Remember the revision we hope to see next.
1648 next_rev[0] = r + 1
1648 next_rev[0] = r + 1
1649 return collect_msng_filenodes
1649 return collect_msng_filenodes
1650
1650
1651 # We have a list of filenodes we think we need for a file, lets remove
1651 # We have a list of filenodes we think we need for a file, lets remove
1652 # all those we now the recipient must have.
1652 # all those we now the recipient must have.
1653 def prune_filenodes(f, filerevlog):
1653 def prune_filenodes(f, filerevlog):
1654 msngset = msng_filenode_set[f]
1654 msngset = msng_filenode_set[f]
1655 hasset = {}
1655 hasset = {}
1656 # If a 'missing' filenode thinks it belongs to a changenode we
1656 # If a 'missing' filenode thinks it belongs to a changenode we
1657 # assume the recipient must have, then the recipient must have
1657 # assume the recipient must have, then the recipient must have
1658 # that filenode.
1658 # that filenode.
1659 for n in msngset:
1659 for n in msngset:
1660 clnode = cl.node(filerevlog.linkrev(n))
1660 clnode = cl.node(filerevlog.linkrev(n))
1661 if clnode in has_cl_set:
1661 if clnode in has_cl_set:
1662 hasset[n] = 1
1662 hasset[n] = 1
1663 prune_parents(filerevlog, hasset, msngset)
1663 prune_parents(filerevlog, hasset, msngset)
1664
1664
1665 # A function generator function that sets up the a context for the
1665 # A function generator function that sets up the a context for the
1666 # inner function.
1666 # inner function.
1667 def lookup_filenode_link_func(fname):
1667 def lookup_filenode_link_func(fname):
1668 msngset = msng_filenode_set[fname]
1668 msngset = msng_filenode_set[fname]
1669 # Lookup the changenode the filenode belongs to.
1669 # Lookup the changenode the filenode belongs to.
1670 def lookup_filenode_link(fnode):
1670 def lookup_filenode_link(fnode):
1671 return msngset[fnode]
1671 return msngset[fnode]
1672 return lookup_filenode_link
1672 return lookup_filenode_link
1673
1673
1674 # Now that we have all theses utility functions to help out and
1674 # Now that we have all theses utility functions to help out and
1675 # logically divide up the task, generate the group.
1675 # logically divide up the task, generate the group.
1676 def gengroup():
1676 def gengroup():
1677 # The set of changed files starts empty.
1677 # The set of changed files starts empty.
1678 changedfiles = {}
1678 changedfiles = {}
1679 # Create a changenode group generator that will call our functions
1679 # Create a changenode group generator that will call our functions
1680 # back to lookup the owning changenode and collect information.
1680 # back to lookup the owning changenode and collect information.
1681 group = cl.group(msng_cl_lst, identity,
1681 group = cl.group(msng_cl_lst, identity,
1682 manifest_and_file_collector(changedfiles))
1682 manifest_and_file_collector(changedfiles))
1683 for chnk in group:
1683 for chnk in group:
1684 yield chnk
1684 yield chnk
1685
1685
1686 # The list of manifests has been collected by the generator
1686 # The list of manifests has been collected by the generator
1687 # calling our functions back.
1687 # calling our functions back.
1688 prune_manifests()
1688 prune_manifests()
1689 msng_mnfst_lst = msng_mnfst_set.keys()
1689 msng_mnfst_lst = msng_mnfst_set.keys()
1690 # Sort the manifestnodes by revision number.
1690 # Sort the manifestnodes by revision number.
1691 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1691 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1692 # Create a generator for the manifestnodes that calls our lookup
1692 # Create a generator for the manifestnodes that calls our lookup
1693 # and data collection functions back.
1693 # and data collection functions back.
1694 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1694 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1695 filenode_collector(changedfiles))
1695 filenode_collector(changedfiles))
1696 for chnk in group:
1696 for chnk in group:
1697 yield chnk
1697 yield chnk
1698
1698
1699 # These are no longer needed, dereference and toss the memory for
1699 # These are no longer needed, dereference and toss the memory for
1700 # them.
1700 # them.
1701 msng_mnfst_lst = None
1701 msng_mnfst_lst = None
1702 msng_mnfst_set.clear()
1702 msng_mnfst_set.clear()
1703
1703
1704 changedfiles = changedfiles.keys()
1704 changedfiles = changedfiles.keys()
1705 changedfiles.sort()
1705 changedfiles.sort()
1706 # Go through all our files in order sorted by name.
1706 # Go through all our files in order sorted by name.
1707 for fname in changedfiles:
1707 for fname in changedfiles:
1708 filerevlog = self.file(fname)
1708 filerevlog = self.file(fname)
1709 # Toss out the filenodes that the recipient isn't really
1709 # Toss out the filenodes that the recipient isn't really
1710 # missing.
1710 # missing.
1711 if msng_filenode_set.has_key(fname):
1711 if msng_filenode_set.has_key(fname):
1712 prune_filenodes(fname, filerevlog)
1712 prune_filenodes(fname, filerevlog)
1713 msng_filenode_lst = msng_filenode_set[fname].keys()
1713 msng_filenode_lst = msng_filenode_set[fname].keys()
1714 else:
1714 else:
1715 msng_filenode_lst = []
1715 msng_filenode_lst = []
1716 # If any filenodes are left, generate the group for them,
1716 # If any filenodes are left, generate the group for them,
1717 # otherwise don't bother.
1717 # otherwise don't bother.
1718 if len(msng_filenode_lst) > 0:
1718 if len(msng_filenode_lst) > 0:
1719 yield changegroup.genchunk(fname)
1719 yield changegroup.genchunk(fname)
1720 # Sort the filenodes by their revision #
1720 # Sort the filenodes by their revision #
1721 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1721 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1722 # Create a group generator and only pass in a changenode
1722 # Create a group generator and only pass in a changenode
1723 # lookup function as we need to collect no information
1723 # lookup function as we need to collect no information
1724 # from filenodes.
1724 # from filenodes.
1725 group = filerevlog.group(msng_filenode_lst,
1725 group = filerevlog.group(msng_filenode_lst,
1726 lookup_filenode_link_func(fname))
1726 lookup_filenode_link_func(fname))
1727 for chnk in group:
1727 for chnk in group:
1728 yield chnk
1728 yield chnk
1729 if msng_filenode_set.has_key(fname):
1729 if msng_filenode_set.has_key(fname):
1730 # Don't need this anymore, toss it to free memory.
1730 # Don't need this anymore, toss it to free memory.
1731 del msng_filenode_set[fname]
1731 del msng_filenode_set[fname]
1732 # Signal that no more groups are left.
1732 # Signal that no more groups are left.
1733 yield changegroup.closechunk()
1733 yield changegroup.closechunk()
1734
1734
1735 if msng_cl_lst:
1735 if msng_cl_lst:
1736 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1736 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1737
1737
1738 return util.chunkbuffer(gengroup())
1738 return util.chunkbuffer(gengroup())
1739
1739
1740 def changegroup(self, basenodes, source):
1740 def changegroup(self, basenodes, source):
1741 """Generate a changegroup of all nodes that we have that a recipient
1741 """Generate a changegroup of all nodes that we have that a recipient
1742 doesn't.
1742 doesn't.
1743
1743
1744 This is much easier than the previous function as we can assume that
1744 This is much easier than the previous function as we can assume that
1745 the recipient has any changenode we aren't sending them."""
1745 the recipient has any changenode we aren't sending them."""
1746
1746
1747 self.hook('preoutgoing', throw=True, source=source)
1747 self.hook('preoutgoing', throw=True, source=source)
1748
1748
1749 cl = self.changelog
1749 cl = self.changelog
1750 nodes = cl.nodesbetween(basenodes, None)[0]
1750 nodes = cl.nodesbetween(basenodes, None)[0]
1751 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1751 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1752 self.changegroupinfo(nodes)
1752 self.changegroupinfo(nodes)
1753
1753
1754 def identity(x):
1754 def identity(x):
1755 return x
1755 return x
1756
1756
1757 def gennodelst(revlog):
1757 def gennodelst(revlog):
1758 for r in xrange(0, revlog.count()):
1758 for r in xrange(0, revlog.count()):
1759 n = revlog.node(r)
1759 n = revlog.node(r)
1760 if revlog.linkrev(n) in revset:
1760 if revlog.linkrev(n) in revset:
1761 yield n
1761 yield n
1762
1762
1763 def changed_file_collector(changedfileset):
1763 def changed_file_collector(changedfileset):
1764 def collect_changed_files(clnode):
1764 def collect_changed_files(clnode):
1765 c = cl.read(clnode)
1765 c = cl.read(clnode)
1766 for fname in c[3]:
1766 for fname in c[3]:
1767 changedfileset[fname] = 1
1767 changedfileset[fname] = 1
1768 return collect_changed_files
1768 return collect_changed_files
1769
1769
1770 def lookuprevlink_func(revlog):
1770 def lookuprevlink_func(revlog):
1771 def lookuprevlink(n):
1771 def lookuprevlink(n):
1772 return cl.node(revlog.linkrev(n))
1772 return cl.node(revlog.linkrev(n))
1773 return lookuprevlink
1773 return lookuprevlink
1774
1774
1775 def gengroup():
1775 def gengroup():
1776 # construct a list of all changed files
1776 # construct a list of all changed files
1777 changedfiles = {}
1777 changedfiles = {}
1778
1778
1779 for chnk in cl.group(nodes, identity,
1779 for chnk in cl.group(nodes, identity,
1780 changed_file_collector(changedfiles)):
1780 changed_file_collector(changedfiles)):
1781 yield chnk
1781 yield chnk
1782 changedfiles = changedfiles.keys()
1782 changedfiles = changedfiles.keys()
1783 changedfiles.sort()
1783 changedfiles.sort()
1784
1784
1785 mnfst = self.manifest
1785 mnfst = self.manifest
1786 nodeiter = gennodelst(mnfst)
1786 nodeiter = gennodelst(mnfst)
1787 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1787 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1788 yield chnk
1788 yield chnk
1789
1789
1790 for fname in changedfiles:
1790 for fname in changedfiles:
1791 filerevlog = self.file(fname)
1791 filerevlog = self.file(fname)
1792 nodeiter = gennodelst(filerevlog)
1792 nodeiter = gennodelst(filerevlog)
1793 nodeiter = list(nodeiter)
1793 nodeiter = list(nodeiter)
1794 if nodeiter:
1794 if nodeiter:
1795 yield changegroup.genchunk(fname)
1795 yield changegroup.genchunk(fname)
1796 lookup = lookuprevlink_func(filerevlog)
1796 lookup = lookuprevlink_func(filerevlog)
1797 for chnk in filerevlog.group(nodeiter, lookup):
1797 for chnk in filerevlog.group(nodeiter, lookup):
1798 yield chnk
1798 yield chnk
1799
1799
1800 yield changegroup.closechunk()
1800 yield changegroup.closechunk()
1801
1801
1802 if nodes:
1802 if nodes:
1803 self.hook('outgoing', node=hex(nodes[0]), source=source)
1803 self.hook('outgoing', node=hex(nodes[0]), source=source)
1804
1804
1805 return util.chunkbuffer(gengroup())
1805 return util.chunkbuffer(gengroup())
1806
1806
1807 def addchangegroup(self, source, srctype, url):
1807 def addchangegroup(self, source, srctype, url):
1808 """add changegroup to repo.
1808 """add changegroup to repo.
1809
1809
1810 return values:
1810 return values:
1811 - nothing changed or no source: 0
1811 - nothing changed or no source: 0
1812 - more heads than before: 1+added heads (2..n)
1812 - more heads than before: 1+added heads (2..n)
1813 - less heads than before: -1-removed heads (-2..-n)
1813 - less heads than before: -1-removed heads (-2..-n)
1814 - number of heads stays the same: 1
1814 - number of heads stays the same: 1
1815 """
1815 """
1816 def csmap(x):
1816 def csmap(x):
1817 self.ui.debug(_("add changeset %s\n") % short(x))
1817 self.ui.debug(_("add changeset %s\n") % short(x))
1818 return cl.count()
1818 return cl.count()
1819
1819
1820 def revmap(x):
1820 def revmap(x):
1821 return cl.rev(x)
1821 return cl.rev(x)
1822
1822
1823 if not source:
1823 if not source:
1824 return 0
1824 return 0
1825
1825
1826 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1826 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1827
1827
1828 changesets = files = revisions = 0
1828 changesets = files = revisions = 0
1829
1829
1830 # write changelog data to temp files so concurrent readers will not see
1830 # write changelog data to temp files so concurrent readers will not see
1831 # inconsistent view
1831 # inconsistent view
1832 cl = self.changelog
1832 cl = self.changelog
1833 cl.delayupdate()
1833 cl.delayupdate()
1834 oldheads = len(cl.heads())
1834 oldheads = len(cl.heads())
1835
1835
1836 tr = self.transaction()
1836 tr = self.transaction()
1837 try:
1837 try:
1838 trp = weakref.proxy(tr)
1838 trp = weakref.proxy(tr)
1839 # pull off the changeset group
1839 # pull off the changeset group
1840 self.ui.status(_("adding changesets\n"))
1840 self.ui.status(_("adding changesets\n"))
1841 cor = cl.count() - 1
1841 cor = cl.count() - 1
1842 chunkiter = changegroup.chunkiter(source)
1842 chunkiter = changegroup.chunkiter(source)
1843 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1843 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1844 raise util.Abort(_("received changelog group is empty"))
1844 raise util.Abort(_("received changelog group is empty"))
1845 cnr = cl.count() - 1
1845 cnr = cl.count() - 1
1846 changesets = cnr - cor
1846 changesets = cnr - cor
1847
1847
1848 # pull off the manifest group
1848 # pull off the manifest group
1849 self.ui.status(_("adding manifests\n"))
1849 self.ui.status(_("adding manifests\n"))
1850 chunkiter = changegroup.chunkiter(source)
1850 chunkiter = changegroup.chunkiter(source)
1851 # no need to check for empty manifest group here:
1851 # no need to check for empty manifest group here:
1852 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1852 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1853 # no new manifest will be created and the manifest group will
1853 # no new manifest will be created and the manifest group will
1854 # be empty during the pull
1854 # be empty during the pull
1855 self.manifest.addgroup(chunkiter, revmap, trp)
1855 self.manifest.addgroup(chunkiter, revmap, trp)
1856
1856
1857 # process the files
1857 # process the files
1858 self.ui.status(_("adding file changes\n"))
1858 self.ui.status(_("adding file changes\n"))
1859 while 1:
1859 while 1:
1860 f = changegroup.getchunk(source)
1860 f = changegroup.getchunk(source)
1861 if not f:
1861 if not f:
1862 break
1862 break
1863 self.ui.debug(_("adding %s revisions\n") % f)
1863 self.ui.debug(_("adding %s revisions\n") % f)
1864 fl = self.file(f)
1864 fl = self.file(f)
1865 o = fl.count()
1865 o = fl.count()
1866 chunkiter = changegroup.chunkiter(source)
1866 chunkiter = changegroup.chunkiter(source)
1867 if fl.addgroup(chunkiter, revmap, trp) is None:
1867 if fl.addgroup(chunkiter, revmap, trp) is None:
1868 raise util.Abort(_("received file revlog group is empty"))
1868 raise util.Abort(_("received file revlog group is empty"))
1869 revisions += fl.count() - o
1869 revisions += fl.count() - o
1870 files += 1
1870 files += 1
1871
1871
1872 # make changelog see real files again
1872 # make changelog see real files again
1873 cl.finalize(trp)
1873 cl.finalize(trp)
1874
1874
1875 newheads = len(self.changelog.heads())
1875 newheads = len(self.changelog.heads())
1876 heads = ""
1876 heads = ""
1877 if oldheads and newheads != oldheads:
1877 if oldheads and newheads != oldheads:
1878 heads = _(" (%+d heads)") % (newheads - oldheads)
1878 heads = _(" (%+d heads)") % (newheads - oldheads)
1879
1879
1880 self.ui.status(_("added %d changesets"
1880 self.ui.status(_("added %d changesets"
1881 " with %d changes to %d files%s\n")
1881 " with %d changes to %d files%s\n")
1882 % (changesets, revisions, files, heads))
1882 % (changesets, revisions, files, heads))
1883
1883
1884 if changesets > 0:
1884 if changesets > 0:
1885 self.hook('pretxnchangegroup', throw=True,
1885 self.hook('pretxnchangegroup', throw=True,
1886 node=hex(self.changelog.node(cor+1)), source=srctype,
1886 node=hex(self.changelog.node(cor+1)), source=srctype,
1887 url=url)
1887 url=url)
1888
1888
1889 tr.close()
1889 tr.close()
1890 finally:
1890 finally:
1891 del tr
1891 del tr
1892
1892
1893 if changesets > 0:
1893 if changesets > 0:
1894 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1894 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1895 source=srctype, url=url)
1895 source=srctype, url=url)
1896
1896
1897 for i in xrange(cor + 1, cnr + 1):
1897 for i in xrange(cor + 1, cnr + 1):
1898 self.hook("incoming", node=hex(self.changelog.node(i)),
1898 self.hook("incoming", node=hex(self.changelog.node(i)),
1899 source=srctype, url=url)
1899 source=srctype, url=url)
1900
1900
1901 # never return 0 here:
1901 # never return 0 here:
1902 if newheads < oldheads:
1902 if newheads < oldheads:
1903 return newheads - oldheads - 1
1903 return newheads - oldheads - 1
1904 else:
1904 else:
1905 return newheads - oldheads + 1
1905 return newheads - oldheads + 1
1906
1906
1907
1907
1908 def stream_in(self, remote):
1908 def stream_in(self, remote):
1909 fp = remote.stream_out()
1909 fp = remote.stream_out()
1910 l = fp.readline()
1910 l = fp.readline()
1911 try:
1911 try:
1912 resp = int(l)
1912 resp = int(l)
1913 except ValueError:
1913 except ValueError:
1914 raise util.UnexpectedOutput(
1914 raise util.UnexpectedOutput(
1915 _('Unexpected response from remote server:'), l)
1915 _('Unexpected response from remote server:'), l)
1916 if resp == 1:
1916 if resp == 1:
1917 raise util.Abort(_('operation forbidden by server'))
1917 raise util.Abort(_('operation forbidden by server'))
1918 elif resp == 2:
1918 elif resp == 2:
1919 raise util.Abort(_('locking the remote repository failed'))
1919 raise util.Abort(_('locking the remote repository failed'))
1920 elif resp != 0:
1920 elif resp != 0:
1921 raise util.Abort(_('the server sent an unknown error code'))
1921 raise util.Abort(_('the server sent an unknown error code'))
1922 self.ui.status(_('streaming all changes\n'))
1922 self.ui.status(_('streaming all changes\n'))
1923 l = fp.readline()
1923 l = fp.readline()
1924 try:
1924 try:
1925 total_files, total_bytes = map(int, l.split(' ', 1))
1925 total_files, total_bytes = map(int, l.split(' ', 1))
1926 except ValueError, TypeError:
1926 except ValueError, TypeError:
1927 raise util.UnexpectedOutput(
1927 raise util.UnexpectedOutput(
1928 _('Unexpected response from remote server:'), l)
1928 _('Unexpected response from remote server:'), l)
1929 self.ui.status(_('%d files to transfer, %s of data\n') %
1929 self.ui.status(_('%d files to transfer, %s of data\n') %
1930 (total_files, util.bytecount(total_bytes)))
1930 (total_files, util.bytecount(total_bytes)))
1931 start = time.time()
1931 start = time.time()
1932 for i in xrange(total_files):
1932 for i in xrange(total_files):
1933 # XXX doesn't support '\n' or '\r' in filenames
1933 # XXX doesn't support '\n' or '\r' in filenames
1934 l = fp.readline()
1934 l = fp.readline()
1935 try:
1935 try:
1936 name, size = l.split('\0', 1)
1936 name, size = l.split('\0', 1)
1937 size = int(size)
1937 size = int(size)
1938 except ValueError, TypeError:
1938 except ValueError, TypeError:
1939 raise util.UnexpectedOutput(
1939 raise util.UnexpectedOutput(
1940 _('Unexpected response from remote server:'), l)
1940 _('Unexpected response from remote server:'), l)
1941 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1941 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1942 ofp = self.sopener(name, 'w')
1942 ofp = self.sopener(name, 'w')
1943 for chunk in util.filechunkiter(fp, limit=size):
1943 for chunk in util.filechunkiter(fp, limit=size):
1944 ofp.write(chunk)
1944 ofp.write(chunk)
1945 ofp.close()
1945 ofp.close()
1946 elapsed = time.time() - start
1946 elapsed = time.time() - start
1947 if elapsed <= 0:
1947 if elapsed <= 0:
1948 elapsed = 0.001
1948 elapsed = 0.001
1949 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1949 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1950 (util.bytecount(total_bytes), elapsed,
1950 (util.bytecount(total_bytes), elapsed,
1951 util.bytecount(total_bytes / elapsed)))
1951 util.bytecount(total_bytes / elapsed)))
1952 self.invalidate()
1952 self.invalidate()
1953 return len(self.heads()) + 1
1953 return len(self.heads()) + 1
1954
1954
1955 def clone(self, remote, heads=[], stream=False):
1955 def clone(self, remote, heads=[], stream=False):
1956 '''clone remote repository.
1956 '''clone remote repository.
1957
1957
1958 keyword arguments:
1958 keyword arguments:
1959 heads: list of revs to clone (forces use of pull)
1959 heads: list of revs to clone (forces use of pull)
1960 stream: use streaming clone if possible'''
1960 stream: use streaming clone if possible'''
1961
1961
1962 # now, all clients that can request uncompressed clones can
1962 # now, all clients that can request uncompressed clones can
1963 # read repo formats supported by all servers that can serve
1963 # read repo formats supported by all servers that can serve
1964 # them.
1964 # them.
1965
1965
1966 # if revlog format changes, client will have to check version
1966 # if revlog format changes, client will have to check version
1967 # and format flags on "stream" capability, and use
1967 # and format flags on "stream" capability, and use
1968 # uncompressed only if compatible.
1968 # uncompressed only if compatible.
1969
1969
1970 if stream and not heads and remote.capable('stream'):
1970 if stream and not heads and remote.capable('stream'):
1971 return self.stream_in(remote)
1971 return self.stream_in(remote)
1972 return self.pull(remote, heads)
1972 return self.pull(remote, heads)
1973
1973
1974 # used to avoid circular references so destructors work
1974 # used to avoid circular references so destructors work
1975 def aftertrans(files):
1975 def aftertrans(files):
1976 renamefiles = [tuple(t) for t in files]
1976 renamefiles = [tuple(t) for t in files]
1977 def a():
1977 def a():
1978 for src, dest in renamefiles:
1978 for src, dest in renamefiles:
1979 util.rename(src, dest)
1979 util.rename(src, dest)
1980 return a
1980 return a
1981
1981
1982 def instance(ui, path, create):
1982 def instance(ui, path, create):
1983 return localrepository(ui, util.drop_scheme('file', path), create)
1983 return localrepository(ui, util.drop_scheme('file', path), create)
1984
1984
1985 def islocal(path):
1985 def islocal(path):
1986 return True
1986 return True
@@ -1,636 +1,636 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import errno, util, os, tempfile, context, heapq
10 import errno, util, os, tempfile, context, heapq
11
11
12 def filemerge(repo, fw, fd, fo, wctx, mctx):
12 def filemerge(repo, fw, fd, fo, wctx, mctx):
13 """perform a 3-way merge in the working directory
13 """perform a 3-way merge in the working directory
14
14
15 fw = original filename in the working directory
15 fw = original filename in the working directory
16 fd = destination filename in the working directory
16 fd = destination filename in the working directory
17 fo = filename in other parent
17 fo = filename in other parent
18 wctx, mctx = working and merge changecontexts
18 wctx, mctx = working and merge changecontexts
19 """
19 """
20
20
21 def temp(prefix, ctx):
21 def temp(prefix, ctx):
22 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
22 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
23 (fd, name) = tempfile.mkstemp(prefix=pre)
23 (fd, name) = tempfile.mkstemp(prefix=pre)
24 data = repo.wwritedata(ctx.path(), ctx.data())
24 data = repo.wwritedata(ctx.path(), ctx.data())
25 f = os.fdopen(fd, "wb")
25 f = os.fdopen(fd, "wb")
26 f.write(data)
26 f.write(data)
27 f.close()
27 f.close()
28 return name
28 return name
29
29
30 fcm = wctx.filectx(fw)
30 fcm = wctx.filectx(fw)
31 fcmdata = wctx.filectx(fd).data()
31 fcmdata = wctx.filectx(fd).data()
32 fco = mctx.filectx(fo)
32 fco = mctx.filectx(fo)
33
33
34 if not fco.cmp(fcmdata): # files identical?
34 if not fco.cmp(fcmdata): # files identical?
35 return None
35 return None
36
36
37 fca = fcm.ancestor(fco)
37 fca = fcm.ancestor(fco)
38 if not fca:
38 if not fca:
39 fca = repo.filectx(fw, fileid=nullrev)
39 fca = repo.filectx(fw, fileid=nullrev)
40 a = repo.wjoin(fd)
40 a = repo.wjoin(fd)
41 b = temp("base", fca)
41 b = temp("base", fca)
42 c = temp("other", fco)
42 c = temp("other", fco)
43
43
44 if fw != fo:
44 if fw != fo:
45 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
45 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
46 else:
46 else:
47 repo.ui.status(_("merging %s\n") % fw)
47 repo.ui.status(_("merging %s\n") % fw)
48
48
49 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
49 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
50
50
51 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
51 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
52 or "hgmerge")
52 or "hgmerge")
53 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
53 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
54 environ={'HG_FILE': fd,
54 environ={'HG_FILE': fd,
55 'HG_MY_NODE': str(wctx.parents()[0]),
55 'HG_MY_NODE': str(wctx.parents()[0]),
56 'HG_OTHER_NODE': str(mctx)})
56 'HG_OTHER_NODE': str(mctx)})
57 if r:
57 if r:
58 repo.ui.warn(_("merging %s failed!\n") % fd)
58 repo.ui.warn(_("merging %s failed!\n") % fd)
59
59
60 os.unlink(b)
60 os.unlink(b)
61 os.unlink(c)
61 os.unlink(c)
62 return r
62 return r
63
63
64 def checkunknown(wctx, mctx):
64 def checkunknown(wctx, mctx):
65 "check for collisions between unknown files and files in mctx"
65 "check for collisions between unknown files and files in mctx"
66 man = mctx.manifest()
66 man = mctx.manifest()
67 for f in wctx.unknown():
67 for f in wctx.unknown():
68 if f in man:
68 if f in man:
69 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
69 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
70 raise util.Abort(_("untracked local file '%s' differs"
70 raise util.Abort(_("untracked local file '%s' differs"
71 " from remote version") % f)
71 " from remote version") % f)
72
72
73 def checkcollision(mctx):
73 def checkcollision(mctx):
74 "check for case folding collisions in the destination context"
74 "check for case folding collisions in the destination context"
75 folded = {}
75 folded = {}
76 for fn in mctx.manifest():
76 for fn in mctx.manifest():
77 fold = fn.lower()
77 fold = fn.lower()
78 if fold in folded:
78 if fold in folded:
79 raise util.Abort(_("case-folding collision between %s and %s")
79 raise util.Abort(_("case-folding collision between %s and %s")
80 % (fn, folded[fold]))
80 % (fn, folded[fold]))
81 folded[fold] = fn
81 folded[fold] = fn
82
82
83 def forgetremoved(wctx, mctx):
83 def forgetremoved(wctx, mctx):
84 """
84 """
85 Forget removed files
85 Forget removed files
86
86
87 If we're jumping between revisions (as opposed to merging), and if
87 If we're jumping between revisions (as opposed to merging), and if
88 neither the working directory nor the target rev has the file,
88 neither the working directory nor the target rev has the file,
89 then we need to remove it from the dirstate, to prevent the
89 then we need to remove it from the dirstate, to prevent the
90 dirstate from listing the file when it is no longer in the
90 dirstate from listing the file when it is no longer in the
91 manifest.
91 manifest.
92 """
92 """
93
93
94 action = []
94 action = []
95 man = mctx.manifest()
95 man = mctx.manifest()
96 for f in wctx.deleted() + wctx.removed():
96 for f in wctx.deleted() + wctx.removed():
97 if f not in man:
97 if f not in man:
98 action.append((f, "f"))
98 action.append((f, "f"))
99
99
100 return action
100 return action
101
101
102 def findcopies(repo, m1, m2, ma, limit):
102 def findcopies(repo, m1, m2, ma, limit):
103 """
103 """
104 Find moves and copies between m1 and m2 back to limit linkrev
104 Find moves and copies between m1 and m2 back to limit linkrev
105 """
105 """
106
106
107 def nonoverlap(d1, d2, d3):
107 def nonoverlap(d1, d2, d3):
108 "Return list of elements in d1 not in d2 or d3"
108 "Return list of elements in d1 not in d2 or d3"
109 l = [d for d in d1 if d not in d3 and d not in d2]
109 l = [d for d in d1 if d not in d3 and d not in d2]
110 l.sort()
110 l.sort()
111 return l
111 return l
112
112
113 def dirname(f):
113 def dirname(f):
114 s = f.rfind("/")
114 s = f.rfind("/")
115 if s == -1:
115 if s == -1:
116 return ""
116 return ""
117 return f[:s]
117 return f[:s]
118
118
119 def dirs(files):
119 def dirs(files):
120 d = {}
120 d = {}
121 for f in files:
121 for f in files:
122 f = dirname(f)
122 f = dirname(f)
123 while f not in d:
123 while f not in d:
124 d[f] = True
124 d[f] = True
125 f = dirname(f)
125 f = dirname(f)
126 return d
126 return d
127
127
128 wctx = repo.workingctx()
128 wctx = repo.workingctx()
129
129
130 def makectx(f, n):
130 def makectx(f, n):
131 if len(n) == 20:
131 if len(n) == 20:
132 return repo.filectx(f, fileid=n)
132 return repo.filectx(f, fileid=n)
133 return wctx.filectx(f)
133 return wctx.filectx(f)
134 ctx = util.cachefunc(makectx)
134 ctx = util.cachefunc(makectx)
135
135
136 def findold(fctx):
136 def findold(fctx):
137 "find files that path was copied from, back to linkrev limit"
137 "find files that path was copied from, back to linkrev limit"
138 old = {}
138 old = {}
139 seen = {}
139 seen = {}
140 orig = fctx.path()
140 orig = fctx.path()
141 visit = [fctx]
141 visit = [fctx]
142 while visit:
142 while visit:
143 fc = visit.pop()
143 fc = visit.pop()
144 s = str(fc)
144 s = str(fc)
145 if s in seen:
145 if s in seen:
146 continue
146 continue
147 seen[s] = 1
147 seen[s] = 1
148 if fc.path() != orig and fc.path() not in old:
148 if fc.path() != orig and fc.path() not in old:
149 old[fc.path()] = 1
149 old[fc.path()] = 1
150 if fc.rev() < limit:
150 if fc.rev() < limit:
151 continue
151 continue
152 visit += fc.parents()
152 visit += fc.parents()
153
153
154 old = old.keys()
154 old = old.keys()
155 old.sort()
155 old.sort()
156 return old
156 return old
157
157
158 copy = {}
158 copy = {}
159 fullcopy = {}
159 fullcopy = {}
160 diverge = {}
160 diverge = {}
161
161
162 def checkcopies(c, man, aman):
162 def checkcopies(c, man, aman):
163 '''check possible copies for filectx c'''
163 '''check possible copies for filectx c'''
164 for of in findold(c):
164 for of in findold(c):
165 fullcopy[c.path()] = of # remember for dir rename detection
165 fullcopy[c.path()] = of # remember for dir rename detection
166 if of not in man: # original file not in other manifest?
166 if of not in man: # original file not in other manifest?
167 if of in ma:
167 if of in ma:
168 diverge.setdefault(of, []).append(c.path())
168 diverge.setdefault(of, []).append(c.path())
169 continue
169 continue
170 # if the original file is unchanged on the other branch,
170 # if the original file is unchanged on the other branch,
171 # no merge needed
171 # no merge needed
172 if man[of] == aman.get(of):
172 if man[of] == aman.get(of):
173 continue
173 continue
174 c2 = ctx(of, man[of])
174 c2 = ctx(of, man[of])
175 ca = c.ancestor(c2)
175 ca = c.ancestor(c2)
176 if not ca: # unrelated?
176 if not ca: # unrelated?
177 continue
177 continue
178 # named changed on only one side?
178 # named changed on only one side?
179 if ca.path() == c.path() or ca.path() == c2.path():
179 if ca.path() == c.path() or ca.path() == c2.path():
180 if c == ca or c2 == ca: # no merge needed, ignore copy
180 if c == ca or c2 == ca: # no merge needed, ignore copy
181 continue
181 continue
182 copy[c.path()] = of
182 copy[c.path()] = of
183
183
184 if not repo.ui.configbool("merge", "followcopies", True):
184 if not repo.ui.configbool("merge", "followcopies", True):
185 return {}, {}
185 return {}, {}
186
186
187 # avoid silly behavior for update from empty dir
187 # avoid silly behavior for update from empty dir
188 if not m1 or not m2 or not ma:
188 if not m1 or not m2 or not ma:
189 return {}, {}
189 return {}, {}
190
190
191 u1 = nonoverlap(m1, m2, ma)
191 u1 = nonoverlap(m1, m2, ma)
192 u2 = nonoverlap(m2, m1, ma)
192 u2 = nonoverlap(m2, m1, ma)
193
193
194 for f in u1:
194 for f in u1:
195 checkcopies(ctx(f, m1[f]), m2, ma)
195 checkcopies(ctx(f, m1[f]), m2, ma)
196
196
197 for f in u2:
197 for f in u2:
198 checkcopies(ctx(f, m2[f]), m1, ma)
198 checkcopies(ctx(f, m2[f]), m1, ma)
199
199
200 d2 = {}
200 d2 = {}
201 for of, fl in diverge.items():
201 for of, fl in diverge.items():
202 for f in fl:
202 for f in fl:
203 fo = list(fl)
203 fo = list(fl)
204 fo.remove(f)
204 fo.remove(f)
205 d2[f] = (of, fo)
205 d2[f] = (of, fo)
206
206
207 if not fullcopy or not repo.ui.configbool("merge", "followdirs", True):
207 if not fullcopy or not repo.ui.configbool("merge", "followdirs", True):
208 return copy, diverge
208 return copy, diverge
209
209
210 # generate a directory move map
210 # generate a directory move map
211 d1, d2 = dirs(m1), dirs(m2)
211 d1, d2 = dirs(m1), dirs(m2)
212 invalid = {}
212 invalid = {}
213 dirmove = {}
213 dirmove = {}
214
214
215 # examine each file copy for a potential directory move, which is
215 # examine each file copy for a potential directory move, which is
216 # when all the files in a directory are moved to a new directory
216 # when all the files in a directory are moved to a new directory
217 for dst, src in fullcopy.items():
217 for dst, src in fullcopy.items():
218 dsrc, ddst = dirname(src), dirname(dst)
218 dsrc, ddst = dirname(src), dirname(dst)
219 if dsrc in invalid:
219 if dsrc in invalid:
220 # already seen to be uninteresting
220 # already seen to be uninteresting
221 continue
221 continue
222 elif dsrc in d1 and ddst in d1:
222 elif dsrc in d1 and ddst in d1:
223 # directory wasn't entirely moved locally
223 # directory wasn't entirely moved locally
224 invalid[dsrc] = True
224 invalid[dsrc] = True
225 elif dsrc in d2 and ddst in d2:
225 elif dsrc in d2 and ddst in d2:
226 # directory wasn't entirely moved remotely
226 # directory wasn't entirely moved remotely
227 invalid[dsrc] = True
227 invalid[dsrc] = True
228 elif dsrc in dirmove and dirmove[dsrc] != ddst:
228 elif dsrc in dirmove and dirmove[dsrc] != ddst:
229 # files from the same directory moved to two different places
229 # files from the same directory moved to two different places
230 invalid[dsrc] = True
230 invalid[dsrc] = True
231 else:
231 else:
232 # looks good so far
232 # looks good so far
233 dirmove[dsrc + "/"] = ddst + "/"
233 dirmove[dsrc + "/"] = ddst + "/"
234
234
235 for i in invalid:
235 for i in invalid:
236 if i in dirmove:
236 if i in dirmove:
237 del dirmove[i]
237 del dirmove[i]
238
238
239 del d1, d2, invalid
239 del d1, d2, invalid
240
240
241 if not dirmove:
241 if not dirmove:
242 return copy, diverge
242 return copy, diverge
243
243
244 # check unaccounted nonoverlapping files against directory moves
244 # check unaccounted nonoverlapping files against directory moves
245 for f in u1 + u2:
245 for f in u1 + u2:
246 if f not in fullcopy:
246 if f not in fullcopy:
247 for d in dirmove:
247 for d in dirmove:
248 if f.startswith(d):
248 if f.startswith(d):
249 # new file added in a directory that was moved, move it
249 # new file added in a directory that was moved, move it
250 copy[f] = dirmove[d] + f[len(d):]
250 copy[f] = dirmove[d] + f[len(d):]
251 break
251 break
252
252
253 return copy, diverge
253 return copy, diverge
254
254
255 def symmetricdifference(repo, rev1, rev2):
255 def symmetricdifference(repo, rev1, rev2):
256 """symmetric difference of the sets of ancestors of rev1 and rev2
256 """symmetric difference of the sets of ancestors of rev1 and rev2
257
257
258 I.e. revisions that are ancestors of rev1 or rev2, but not both.
258 I.e. revisions that are ancestors of rev1 or rev2, but not both.
259 """
259 """
260 # basic idea:
260 # basic idea:
261 # - mark rev1 and rev2 with different colors
261 # - mark rev1 and rev2 with different colors
262 # - walk the graph in topological order with the help of a heap;
262 # - walk the graph in topological order with the help of a heap;
263 # for each revision r:
263 # for each revision r:
264 # - if r has only one color, we want to return it
264 # - if r has only one color, we want to return it
265 # - add colors[r] to its parents
265 # - add colors[r] to its parents
266 #
266 #
267 # We keep track of the number of revisions in the heap that
267 # We keep track of the number of revisions in the heap that
268 # we may be interested in. We stop walking the graph as soon
268 # we may be interested in. We stop walking the graph as soon
269 # as this number reaches 0.
269 # as this number reaches 0.
270 WHITE = 1
270 WHITE = 1
271 BLACK = 2
271 BLACK = 2
272 ALLCOLORS = WHITE | BLACK
272 ALLCOLORS = WHITE | BLACK
273 colors = {rev1: WHITE, rev2: BLACK}
273 colors = {rev1: WHITE, rev2: BLACK}
274
274
275 cl = repo.changelog
275 cl = repo.changelog
276
276
277 visit = [-rev1, -rev2]
277 visit = [-rev1, -rev2]
278 heapq.heapify(visit)
278 heapq.heapify(visit)
279 n_wanted = len(visit)
279 n_wanted = len(visit)
280 ret = []
280 ret = []
281
281
282 while n_wanted:
282 while n_wanted:
283 r = -heapq.heappop(visit)
283 r = -heapq.heappop(visit)
284 wanted = colors[r] != ALLCOLORS
284 wanted = colors[r] != ALLCOLORS
285 n_wanted -= wanted
285 n_wanted -= wanted
286 if wanted:
286 if wanted:
287 ret.append(r)
287 ret.append(r)
288
288
289 for p in cl.parentrevs(r):
289 for p in cl.parentrevs(r):
290 if p == nullrev:
290 if p == nullrev:
291 continue
291 continue
292 if p not in colors:
292 if p not in colors:
293 # first time we see p; add it to visit
293 # first time we see p; add it to visit
294 n_wanted += wanted
294 n_wanted += wanted
295 colors[p] = colors[r]
295 colors[p] = colors[r]
296 heapq.heappush(visit, -p)
296 heapq.heappush(visit, -p)
297 elif colors[p] != ALLCOLORS and colors[p] != colors[r]:
297 elif colors[p] != ALLCOLORS and colors[p] != colors[r]:
298 # at first we thought we wanted p, but now
298 # at first we thought we wanted p, but now
299 # we know we don't really want it
299 # we know we don't really want it
300 n_wanted -= 1
300 n_wanted -= 1
301 colors[p] |= colors[r]
301 colors[p] |= colors[r]
302
302
303 del colors[r]
303 del colors[r]
304
304
305 return ret
305 return ret
306
306
307 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
307 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
308 """
308 """
309 Merge p1 and p2 with ancestor ma and generate merge action list
309 Merge p1 and p2 with ancestor ma and generate merge action list
310
310
311 overwrite = whether we clobber working files
311 overwrite = whether we clobber working files
312 partial = function to filter file lists
312 partial = function to filter file lists
313 """
313 """
314
314
315 repo.ui.note(_("resolving manifests\n"))
315 repo.ui.note(_("resolving manifests\n"))
316 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
316 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
317 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
317 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
318
318
319 m1 = p1.manifest()
319 m1 = p1.manifest()
320 m2 = p2.manifest()
320 m2 = p2.manifest()
321 ma = pa.manifest()
321 ma = pa.manifest()
322 backwards = (pa == p2)
322 backwards = (pa == p2)
323 action = []
323 action = []
324 copy = {}
324 copy = {}
325 diverge = {}
325 diverge = {}
326
326
327 def fmerge(f, f2=None, fa=None):
327 def fmerge(f, f2=None, fa=None):
328 """merge flags"""
328 """merge flags"""
329 if not f2:
329 if not f2:
330 f2 = f
330 f2 = f
331 fa = f
331 fa = f
332 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
332 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
333 if ((a^b) | (a^c)) ^ a:
333 if ((a^b) | (a^c)) ^ a:
334 return 'x'
334 return 'x'
335 a, b, c = ma.linkf(fa), m1.linkf(f), m2.linkf(f2)
335 a, b, c = ma.linkf(fa), m1.linkf(f), m2.linkf(f2)
336 if ((a^b) | (a^c)) ^ a:
336 if ((a^b) | (a^c)) ^ a:
337 return 'l'
337 return 'l'
338 return ''
338 return ''
339
339
340 def act(msg, m, f, *args):
340 def act(msg, m, f, *args):
341 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
341 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
342 action.append((f, m) + args)
342 action.append((f, m) + args)
343
343
344 if not (backwards or overwrite):
344 if not (backwards or overwrite):
345 rev1 = p1.rev()
345 rev1 = p1.rev()
346 if rev1 is None:
346 if rev1 is None:
347 # p1 is a workingctx
347 # p1 is a workingctx
348 rev1 = p1.parents()[0].rev()
348 rev1 = p1.parents()[0].rev()
349 limit = min(symmetricdifference(repo, rev1, p2.rev()))
349 limit = min(symmetricdifference(repo, rev1, p2.rev()))
350 copy, diverge = findcopies(repo, m1, m2, ma, limit)
350 copy, diverge = findcopies(repo, m1, m2, ma, limit)
351
351
352 for of, fl in diverge.items():
352 for of, fl in diverge.items():
353 act("divergent renames", "dr", of, fl)
353 act("divergent renames", "dr", of, fl)
354
354
355 copied = dict.fromkeys(copy.values())
355 copied = dict.fromkeys(copy.values())
356
356
357 # Compare manifests
357 # Compare manifests
358 for f, n in m1.iteritems():
358 for f, n in m1.iteritems():
359 if partial and not partial(f):
359 if partial and not partial(f):
360 continue
360 continue
361 if f in m2:
361 if f in m2:
362 # are files different?
362 # are files different?
363 if n != m2[f]:
363 if n != m2[f]:
364 a = ma.get(f, nullid)
364 a = ma.get(f, nullid)
365 # are both different from the ancestor?
365 # are both different from the ancestor?
366 if not overwrite and n != a and m2[f] != a:
366 if not overwrite and n != a and m2[f] != a:
367 act("versions differ", "m", f, f, f, fmerge(f), False)
367 act("versions differ", "m", f, f, f, fmerge(f), False)
368 # are we clobbering?
368 # are we clobbering?
369 # is remote's version newer?
369 # is remote's version newer?
370 # or are we going back in time and clean?
370 # or are we going back in time and clean?
371 elif overwrite or m2[f] != a or (backwards and not n[20:]):
371 elif overwrite or m2[f] != a or (backwards and not n[20:]):
372 act("remote is newer", "g", f, m2.flags(f))
372 act("remote is newer", "g", f, m2.flags(f))
373 # local is newer, not overwrite, check mode bits
373 # local is newer, not overwrite, check mode bits
374 elif fmerge(f) != m1.flags(f):
374 elif fmerge(f) != m1.flags(f):
375 act("update permissions", "e", f, m2.flags(f))
375 act("update permissions", "e", f, m2.flags(f))
376 # contents same, check mode bits
376 # contents same, check mode bits
377 elif m1.flags(f) != m2.flags(f):
377 elif m1.flags(f) != m2.flags(f):
378 if overwrite or fmerge(f) != m1.flags(f):
378 if overwrite or fmerge(f) != m1.flags(f):
379 act("update permissions", "e", f, m2.flags(f))
379 act("update permissions", "e", f, m2.flags(f))
380 elif f in copied:
380 elif f in copied:
381 continue
381 continue
382 elif f in copy:
382 elif f in copy:
383 f2 = copy[f]
383 f2 = copy[f]
384 if f2 not in m2: # directory rename
384 if f2 not in m2: # directory rename
385 act("remote renamed directory to " + f2, "d",
385 act("remote renamed directory to " + f2, "d",
386 f, None, f2, m1.flags(f))
386 f, None, f2, m1.flags(f))
387 elif f2 in m1: # case 2 A,B/B/B
387 elif f2 in m1: # case 2 A,B/B/B
388 act("local copied to " + f2, "m",
388 act("local copied to " + f2, "m",
389 f, f2, f, fmerge(f, f2, f2), False)
389 f, f2, f, fmerge(f, f2, f2), False)
390 else: # case 4,21 A/B/B
390 else: # case 4,21 A/B/B
391 act("local moved to " + f2, "m",
391 act("local moved to " + f2, "m",
392 f, f2, f, fmerge(f, f2, f2), False)
392 f, f2, f, fmerge(f, f2, f2), False)
393 elif f in ma:
393 elif f in ma:
394 if n != ma[f] and not overwrite:
394 if n != ma[f] and not overwrite:
395 if repo.ui.prompt(
395 if repo.ui.prompt(
396 (_(" local changed %s which remote deleted\n") % f) +
396 (_(" local changed %s which remote deleted\n") % f) +
397 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
397 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
398 act("prompt delete", "r", f)
398 act("prompt delete", "r", f)
399 else:
399 else:
400 act("other deleted", "r", f)
400 act("other deleted", "r", f)
401 else:
401 else:
402 # file is created on branch or in working directory
402 # file is created on branch or in working directory
403 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
403 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
404 act("remote deleted", "r", f)
404 act("remote deleted", "r", f)
405
405
406 for f, n in m2.iteritems():
406 for f, n in m2.iteritems():
407 if partial and not partial(f):
407 if partial and not partial(f):
408 continue
408 continue
409 if f in m1:
409 if f in m1:
410 continue
410 continue
411 if f in copied:
411 if f in copied:
412 continue
412 continue
413 if f in copy:
413 if f in copy:
414 f2 = copy[f]
414 f2 = copy[f]
415 if f2 not in m1: # directory rename
415 if f2 not in m1: # directory rename
416 act("local renamed directory to " + f2, "d",
416 act("local renamed directory to " + f2, "d",
417 None, f, f2, m2.flags(f))
417 None, f, f2, m2.flags(f))
418 elif f2 in m2: # rename case 1, A/A,B/A
418 elif f2 in m2: # rename case 1, A/A,B/A
419 act("remote copied to " + f, "m",
419 act("remote copied to " + f, "m",
420 f2, f, f, fmerge(f2, f, f2), False)
420 f2, f, f, fmerge(f2, f, f2), False)
421 else: # case 3,20 A/B/A
421 else: # case 3,20 A/B/A
422 act("remote moved to " + f, "m",
422 act("remote moved to " + f, "m",
423 f2, f, f, fmerge(f2, f, f2), True)
423 f2, f, f, fmerge(f2, f, f2), True)
424 elif f in ma:
424 elif f in ma:
425 if overwrite or backwards:
425 if overwrite or backwards:
426 act("recreating", "g", f, m2.flags(f))
426 act("recreating", "g", f, m2.flags(f))
427 elif n != ma[f]:
427 elif n != ma[f]:
428 if repo.ui.prompt(
428 if repo.ui.prompt(
429 (_("remote changed %s which local deleted\n") % f) +
429 (_("remote changed %s which local deleted\n") % f) +
430 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
430 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
431 act("prompt recreating", "g", f, m2.flags(f))
431 act("prompt recreating", "g", f, m2.flags(f))
432 else:
432 else:
433 act("remote created", "g", f, m2.flags(f))
433 act("remote created", "g", f, m2.flags(f))
434
434
435 return action
435 return action
436
436
437 def applyupdates(repo, action, wctx, mctx):
437 def applyupdates(repo, action, wctx, mctx):
438 "apply the merge action list to the working directory"
438 "apply the merge action list to the working directory"
439
439
440 updated, merged, removed, unresolved = 0, 0, 0, 0
440 updated, merged, removed, unresolved = 0, 0, 0, 0
441 action.sort()
441 action.sort()
442 # prescan for copy/renames
442 # prescan for copy/renames
443 for a in action:
443 for a in action:
444 f, m = a[:2]
444 f, m = a[:2]
445 if m == 'm': # merge
445 if m == 'm': # merge
446 f2, fd, flags, move = a[2:]
446 f2, fd, flags, move = a[2:]
447 if f != fd:
447 if f != fd:
448 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
448 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
449 repo.wwrite(fd, repo.wread(f), flags)
449 repo.wwrite(fd, repo.wread(f), flags)
450
450
451 audit_path = util.path_auditor(repo.root)
451 audit_path = util.path_auditor(repo.root)
452
452
453 for a in action:
453 for a in action:
454 f, m = a[:2]
454 f, m = a[:2]
455 if f and f[0] == "/":
455 if f and f[0] == "/":
456 continue
456 continue
457 if m == "r": # remove
457 if m == "r": # remove
458 repo.ui.note(_("removing %s\n") % f)
458 repo.ui.note(_("removing %s\n") % f)
459 audit_path(f)
459 audit_path(f)
460 try:
460 try:
461 util.unlink(repo.wjoin(f))
461 util.unlink(repo.wjoin(f))
462 except OSError, inst:
462 except OSError, inst:
463 if inst.errno != errno.ENOENT:
463 if inst.errno != errno.ENOENT:
464 repo.ui.warn(_("update failed to remove %s: %s!\n") %
464 repo.ui.warn(_("update failed to remove %s: %s!\n") %
465 (f, inst.strerror))
465 (f, inst.strerror))
466 removed += 1
466 removed += 1
467 elif m == "m": # merge
467 elif m == "m": # merge
468 f2, fd, flags, move = a[2:]
468 f2, fd, flags, move = a[2:]
469 r = filemerge(repo, f, fd, f2, wctx, mctx)
469 r = filemerge(repo, f, fd, f2, wctx, mctx)
470 if r > 0:
470 if r > 0:
471 unresolved += 1
471 unresolved += 1
472 else:
472 else:
473 if r is None:
473 if r is None:
474 updated += 1
474 updated += 1
475 else:
475 else:
476 merged += 1
476 merged += 1
477 util.set_exec(repo.wjoin(fd), "x" in flags)
477 util.set_exec(repo.wjoin(fd), "x" in flags)
478 if f != fd and move and util.lexists(repo.wjoin(f)):
478 if f != fd and move and util.lexists(repo.wjoin(f)):
479 repo.ui.debug(_("removing %s\n") % f)
479 repo.ui.debug(_("removing %s\n") % f)
480 os.unlink(repo.wjoin(f))
480 os.unlink(repo.wjoin(f))
481 elif m == "g": # get
481 elif m == "g": # get
482 flags = a[2]
482 flags = a[2]
483 repo.ui.note(_("getting %s\n") % f)
483 repo.ui.note(_("getting %s\n") % f)
484 t = mctx.filectx(f).data()
484 t = mctx.filectx(f).data()
485 repo.wwrite(f, t, flags)
485 repo.wwrite(f, t, flags)
486 updated += 1
486 updated += 1
487 elif m == "d": # directory rename
487 elif m == "d": # directory rename
488 f2, fd, flags = a[2:]
488 f2, fd, flags = a[2:]
489 if f:
489 if f:
490 repo.ui.note(_("moving %s to %s\n") % (f, fd))
490 repo.ui.note(_("moving %s to %s\n") % (f, fd))
491 t = wctx.filectx(f).data()
491 t = wctx.filectx(f).data()
492 repo.wwrite(fd, t, flags)
492 repo.wwrite(fd, t, flags)
493 util.unlink(repo.wjoin(f))
493 util.unlink(repo.wjoin(f))
494 if f2:
494 if f2:
495 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
495 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
496 t = mctx.filectx(f2).data()
496 t = mctx.filectx(f2).data()
497 repo.wwrite(fd, t, flags)
497 repo.wwrite(fd, t, flags)
498 updated += 1
498 updated += 1
499 elif m == "dr": # divergent renames
499 elif m == "dr": # divergent renames
500 fl = a[2]
500 fl = a[2]
501 repo.ui.warn("warning: detected divergent renames of %s to:\n" % f)
501 repo.ui.warn("warning: detected divergent renames of %s to:\n" % f)
502 for nf in fl:
502 for nf in fl:
503 repo.ui.warn(" %s\n" % nf)
503 repo.ui.warn(" %s\n" % nf)
504 elif m == "e": # exec
504 elif m == "e": # exec
505 flags = a[2]
505 flags = a[2]
506 util.set_exec(repo.wjoin(f), flags)
506 util.set_exec(repo.wjoin(f), flags)
507
507
508 return updated, merged, removed, unresolved
508 return updated, merged, removed, unresolved
509
509
510 def recordupdates(repo, action, branchmerge):
510 def recordupdates(repo, action, branchmerge):
511 "record merge actions to the dirstate"
511 "record merge actions to the dirstate"
512
512
513 for a in action:
513 for a in action:
514 f, m = a[:2]
514 f, m = a[:2]
515 if m == "r": # remove
515 if m == "r": # remove
516 if branchmerge:
516 if branchmerge:
517 repo.dirstate.remove(f)
517 repo.dirstate.remove(f)
518 else:
518 else:
519 repo.dirstate.forget(f)
519 repo.dirstate.forget(f)
520 elif m == "f": # forget
520 elif m == "f": # forget
521 repo.dirstate.forget(f)
521 repo.dirstate.forget(f)
522 elif m in "ge": # get or exec change
522 elif m in "ge": # get or exec change
523 if branchmerge:
523 if branchmerge:
524 repo.dirstate.normaldirty(f)
524 repo.dirstate.normaldirty(f)
525 else:
525 else:
526 repo.dirstate.normal(f)
526 repo.dirstate.normal(f)
527 elif m == "m": # merge
527 elif m == "m": # merge
528 f2, fd, flag, move = a[2:]
528 f2, fd, flag, move = a[2:]
529 if branchmerge:
529 if branchmerge:
530 # We've done a branch merge, mark this file as merged
530 # We've done a branch merge, mark this file as merged
531 # so that we properly record the merger later
531 # so that we properly record the merger later
532 repo.dirstate.merge(fd)
532 repo.dirstate.merge(fd)
533 if f != f2: # copy/rename
533 if f != f2: # copy/rename
534 if move:
534 if move:
535 repo.dirstate.remove(f)
535 repo.dirstate.remove(f)
536 if f != fd:
536 if f != fd:
537 repo.dirstate.copy(f, fd)
537 repo.dirstate.copy(f, fd)
538 else:
538 else:
539 repo.dirstate.copy(f2, fd)
539 repo.dirstate.copy(f2, fd)
540 else:
540 else:
541 # We've update-merged a locally modified file, so
541 # We've update-merged a locally modified file, so
542 # we set the dirstate to emulate a normal checkout
542 # we set the dirstate to emulate a normal checkout
543 # of that file some time in the past. Thus our
543 # of that file some time in the past. Thus our
544 # merge will appear as a normal local file
544 # merge will appear as a normal local file
545 # modification.
545 # modification.
546 repo.dirstate.normaldirty(fd)
546 repo.dirstate.normallookup(fd)
547 if move:
547 if move:
548 repo.dirstate.forget(f)
548 repo.dirstate.forget(f)
549 elif m == "d": # directory rename
549 elif m == "d": # directory rename
550 f2, fd, flag = a[2:]
550 f2, fd, flag = a[2:]
551 if not f2 and f not in repo.dirstate:
551 if not f2 and f not in repo.dirstate:
552 # untracked file moved
552 # untracked file moved
553 continue
553 continue
554 if branchmerge:
554 if branchmerge:
555 repo.dirstate.add(fd)
555 repo.dirstate.add(fd)
556 if f:
556 if f:
557 repo.dirstate.remove(f)
557 repo.dirstate.remove(f)
558 repo.dirstate.copy(f, fd)
558 repo.dirstate.copy(f, fd)
559 if f2:
559 if f2:
560 repo.dirstate.copy(f2, fd)
560 repo.dirstate.copy(f2, fd)
561 else:
561 else:
562 repo.dirstate.normal(fd)
562 repo.dirstate.normal(fd)
563 if f:
563 if f:
564 repo.dirstate.forget(f)
564 repo.dirstate.forget(f)
565
565
566 def update(repo, node, branchmerge, force, partial):
566 def update(repo, node, branchmerge, force, partial):
567 """
567 """
568 Perform a merge between the working directory and the given node
568 Perform a merge between the working directory and the given node
569
569
570 branchmerge = whether to merge between branches
570 branchmerge = whether to merge between branches
571 force = whether to force branch merging or file overwriting
571 force = whether to force branch merging or file overwriting
572 partial = a function to filter file lists (dirstate not updated)
572 partial = a function to filter file lists (dirstate not updated)
573 """
573 """
574
574
575 wlock = repo.wlock()
575 wlock = repo.wlock()
576 try:
576 try:
577 wc = repo.workingctx()
577 wc = repo.workingctx()
578 if node is None:
578 if node is None:
579 # tip of current branch
579 # tip of current branch
580 try:
580 try:
581 node = repo.branchtags()[wc.branch()]
581 node = repo.branchtags()[wc.branch()]
582 except KeyError:
582 except KeyError:
583 raise util.Abort(_("branch %s not found") % wc.branch())
583 raise util.Abort(_("branch %s not found") % wc.branch())
584 overwrite = force and not branchmerge
584 overwrite = force and not branchmerge
585 forcemerge = force and branchmerge
585 forcemerge = force and branchmerge
586 pl = wc.parents()
586 pl = wc.parents()
587 p1, p2 = pl[0], repo.changectx(node)
587 p1, p2 = pl[0], repo.changectx(node)
588 pa = p1.ancestor(p2)
588 pa = p1.ancestor(p2)
589 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
589 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
590 fastforward = False
590 fastforward = False
591
591
592 ### check phase
592 ### check phase
593 if not overwrite and len(pl) > 1:
593 if not overwrite and len(pl) > 1:
594 raise util.Abort(_("outstanding uncommitted merges"))
594 raise util.Abort(_("outstanding uncommitted merges"))
595 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
595 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
596 if branchmerge:
596 if branchmerge:
597 if p1.branch() != p2.branch() and pa != p2:
597 if p1.branch() != p2.branch() and pa != p2:
598 fastforward = True
598 fastforward = True
599 else:
599 else:
600 raise util.Abort(_("there is nothing to merge, just use "
600 raise util.Abort(_("there is nothing to merge, just use "
601 "'hg update' or look at 'hg heads'"))
601 "'hg update' or look at 'hg heads'"))
602 elif not (overwrite or branchmerge):
602 elif not (overwrite or branchmerge):
603 raise util.Abort(_("update spans branches, use 'hg merge' "
603 raise util.Abort(_("update spans branches, use 'hg merge' "
604 "or 'hg update -C' to lose changes"))
604 "or 'hg update -C' to lose changes"))
605 if branchmerge and not forcemerge:
605 if branchmerge and not forcemerge:
606 if wc.files():
606 if wc.files():
607 raise util.Abort(_("outstanding uncommitted changes"))
607 raise util.Abort(_("outstanding uncommitted changes"))
608
608
609 ### calculate phase
609 ### calculate phase
610 action = []
610 action = []
611 if not force:
611 if not force:
612 checkunknown(wc, p2)
612 checkunknown(wc, p2)
613 if not util.checkfolding(repo.path):
613 if not util.checkfolding(repo.path):
614 checkcollision(p2)
614 checkcollision(p2)
615 if not branchmerge:
615 if not branchmerge:
616 action += forgetremoved(wc, p2)
616 action += forgetremoved(wc, p2)
617 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
617 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
618
618
619 ### apply phase
619 ### apply phase
620 if not branchmerge: # just jump to the new rev
620 if not branchmerge: # just jump to the new rev
621 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
621 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
622 if not partial:
622 if not partial:
623 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
623 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
624
624
625 stats = applyupdates(repo, action, wc, p2)
625 stats = applyupdates(repo, action, wc, p2)
626
626
627 if not partial:
627 if not partial:
628 recordupdates(repo, action, branchmerge)
628 recordupdates(repo, action, branchmerge)
629 repo.dirstate.setparents(fp1, fp2)
629 repo.dirstate.setparents(fp1, fp2)
630 if not branchmerge and not fastforward:
630 if not branchmerge and not fastforward:
631 repo.dirstate.setbranch(p2.branch())
631 repo.dirstate.setbranch(p2.branch())
632 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
632 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
633
633
634 return stats
634 return stats
635 finally:
635 finally:
636 del wlock
636 del wlock
General Comments 0
You need to be logged in to leave comments. Login now