##// END OF EJS Templates
localrepo and dirstate: rename reload to invalidate...
Matt Mackall -
r4613:3a645af7 default
parent child Browse files
Show More
@@ -1,2302 +1,2302 b''
1 # queue.py - patch queues for mercurial
1 # queue.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 import os, sys, re, errno
34 import os, sys, re, errno
35
35
36 commands.norepo += " qclone qversion"
36 commands.norepo += " qclone qversion"
37
37
38 # Patch names looks like unix-file names.
38 # Patch names looks like unix-file names.
39 # They must be joinable with queue directory and result in the patch path.
39 # They must be joinable with queue directory and result in the patch path.
40 normname = util.normpath
40 normname = util.normpath
41
41
42 class statusentry:
42 class statusentry:
43 def __init__(self, rev, name=None):
43 def __init__(self, rev, name=None):
44 if not name:
44 if not name:
45 fields = rev.split(':', 1)
45 fields = rev.split(':', 1)
46 if len(fields) == 2:
46 if len(fields) == 2:
47 self.rev, self.name = fields
47 self.rev, self.name = fields
48 else:
48 else:
49 self.rev, self.name = None, None
49 self.rev, self.name = None, None
50 else:
50 else:
51 self.rev, self.name = rev, name
51 self.rev, self.name = rev, name
52
52
53 def __str__(self):
53 def __str__(self):
54 return self.rev + ':' + self.name
54 return self.rev + ':' + self.name
55
55
56 class queue:
56 class queue:
57 def __init__(self, ui, path, patchdir=None):
57 def __init__(self, ui, path, patchdir=None):
58 self.basepath = path
58 self.basepath = path
59 self.path = patchdir or os.path.join(path, "patches")
59 self.path = patchdir or os.path.join(path, "patches")
60 self.opener = util.opener(self.path)
60 self.opener = util.opener(self.path)
61 self.ui = ui
61 self.ui = ui
62 self.applied = []
62 self.applied = []
63 self.full_series = []
63 self.full_series = []
64 self.applied_dirty = 0
64 self.applied_dirty = 0
65 self.series_dirty = 0
65 self.series_dirty = 0
66 self.series_path = "series"
66 self.series_path = "series"
67 self.status_path = "status"
67 self.status_path = "status"
68 self.guards_path = "guards"
68 self.guards_path = "guards"
69 self.active_guards = None
69 self.active_guards = None
70 self.guards_dirty = False
70 self.guards_dirty = False
71 self._diffopts = None
71 self._diffopts = None
72
72
73 if os.path.exists(self.join(self.series_path)):
73 if os.path.exists(self.join(self.series_path)):
74 self.full_series = self.opener(self.series_path).read().splitlines()
74 self.full_series = self.opener(self.series_path).read().splitlines()
75 self.parse_series()
75 self.parse_series()
76
76
77 if os.path.exists(self.join(self.status_path)):
77 if os.path.exists(self.join(self.status_path)):
78 lines = self.opener(self.status_path).read().splitlines()
78 lines = self.opener(self.status_path).read().splitlines()
79 self.applied = [statusentry(l) for l in lines]
79 self.applied = [statusentry(l) for l in lines]
80
80
81 def diffopts(self):
81 def diffopts(self):
82 if self._diffopts is None:
82 if self._diffopts is None:
83 self._diffopts = patch.diffopts(self.ui)
83 self._diffopts = patch.diffopts(self.ui)
84 return self._diffopts
84 return self._diffopts
85
85
86 def join(self, *p):
86 def join(self, *p):
87 return os.path.join(self.path, *p)
87 return os.path.join(self.path, *p)
88
88
89 def find_series(self, patch):
89 def find_series(self, patch):
90 pre = re.compile("(\s*)([^#]+)")
90 pre = re.compile("(\s*)([^#]+)")
91 index = 0
91 index = 0
92 for l in self.full_series:
92 for l in self.full_series:
93 m = pre.match(l)
93 m = pre.match(l)
94 if m:
94 if m:
95 s = m.group(2)
95 s = m.group(2)
96 s = s.rstrip()
96 s = s.rstrip()
97 if s == patch:
97 if s == patch:
98 return index
98 return index
99 index += 1
99 index += 1
100 return None
100 return None
101
101
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103
103
104 def parse_series(self):
104 def parse_series(self):
105 self.series = []
105 self.series = []
106 self.series_guards = []
106 self.series_guards = []
107 for l in self.full_series:
107 for l in self.full_series:
108 h = l.find('#')
108 h = l.find('#')
109 if h == -1:
109 if h == -1:
110 patch = l
110 patch = l
111 comment = ''
111 comment = ''
112 elif h == 0:
112 elif h == 0:
113 continue
113 continue
114 else:
114 else:
115 patch = l[:h]
115 patch = l[:h]
116 comment = l[h:]
116 comment = l[h:]
117 patch = patch.strip()
117 patch = patch.strip()
118 if patch:
118 if patch:
119 if patch in self.series:
119 if patch in self.series:
120 raise util.Abort(_('%s appears more than once in %s') %
120 raise util.Abort(_('%s appears more than once in %s') %
121 (patch, self.join(self.series_path)))
121 (patch, self.join(self.series_path)))
122 self.series.append(patch)
122 self.series.append(patch)
123 self.series_guards.append(self.guard_re.findall(comment))
123 self.series_guards.append(self.guard_re.findall(comment))
124
124
125 def check_guard(self, guard):
125 def check_guard(self, guard):
126 bad_chars = '# \t\r\n\f'
126 bad_chars = '# \t\r\n\f'
127 first = guard[0]
127 first = guard[0]
128 for c in '-+':
128 for c in '-+':
129 if first == c:
129 if first == c:
130 return (_('guard %r starts with invalid character: %r') %
130 return (_('guard %r starts with invalid character: %r') %
131 (guard, c))
131 (guard, c))
132 for c in bad_chars:
132 for c in bad_chars:
133 if c in guard:
133 if c in guard:
134 return _('invalid character in guard %r: %r') % (guard, c)
134 return _('invalid character in guard %r: %r') % (guard, c)
135
135
136 def set_active(self, guards):
136 def set_active(self, guards):
137 for guard in guards:
137 for guard in guards:
138 bad = self.check_guard(guard)
138 bad = self.check_guard(guard)
139 if bad:
139 if bad:
140 raise util.Abort(bad)
140 raise util.Abort(bad)
141 guards = dict.fromkeys(guards).keys()
141 guards = dict.fromkeys(guards).keys()
142 guards.sort()
142 guards.sort()
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 self.active_guards = guards
144 self.active_guards = guards
145 self.guards_dirty = True
145 self.guards_dirty = True
146
146
147 def active(self):
147 def active(self):
148 if self.active_guards is None:
148 if self.active_guards is None:
149 self.active_guards = []
149 self.active_guards = []
150 try:
150 try:
151 guards = self.opener(self.guards_path).read().split()
151 guards = self.opener(self.guards_path).read().split()
152 except IOError, err:
152 except IOError, err:
153 if err.errno != errno.ENOENT: raise
153 if err.errno != errno.ENOENT: raise
154 guards = []
154 guards = []
155 for i, guard in enumerate(guards):
155 for i, guard in enumerate(guards):
156 bad = self.check_guard(guard)
156 bad = self.check_guard(guard)
157 if bad:
157 if bad:
158 self.ui.warn('%s:%d: %s\n' %
158 self.ui.warn('%s:%d: %s\n' %
159 (self.join(self.guards_path), i + 1, bad))
159 (self.join(self.guards_path), i + 1, bad))
160 else:
160 else:
161 self.active_guards.append(guard)
161 self.active_guards.append(guard)
162 return self.active_guards
162 return self.active_guards
163
163
164 def set_guards(self, idx, guards):
164 def set_guards(self, idx, guards):
165 for g in guards:
165 for g in guards:
166 if len(g) < 2:
166 if len(g) < 2:
167 raise util.Abort(_('guard %r too short') % g)
167 raise util.Abort(_('guard %r too short') % g)
168 if g[0] not in '-+':
168 if g[0] not in '-+':
169 raise util.Abort(_('guard %r starts with invalid char') % g)
169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 bad = self.check_guard(g[1:])
170 bad = self.check_guard(g[1:])
171 if bad:
171 if bad:
172 raise util.Abort(bad)
172 raise util.Abort(bad)
173 drop = self.guard_re.sub('', self.full_series[idx])
173 drop = self.guard_re.sub('', self.full_series[idx])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 self.parse_series()
175 self.parse_series()
176 self.series_dirty = True
176 self.series_dirty = True
177
177
178 def pushable(self, idx):
178 def pushable(self, idx):
179 if isinstance(idx, str):
179 if isinstance(idx, str):
180 idx = self.series.index(idx)
180 idx = self.series.index(idx)
181 patchguards = self.series_guards[idx]
181 patchguards = self.series_guards[idx]
182 if not patchguards:
182 if not patchguards:
183 return True, None
183 return True, None
184 default = False
184 default = False
185 guards = self.active()
185 guards = self.active()
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 if exactneg:
187 if exactneg:
188 return False, exactneg[0]
188 return False, exactneg[0]
189 pos = [g for g in patchguards if g[0] == '+']
189 pos = [g for g in patchguards if g[0] == '+']
190 exactpos = [g for g in pos if g[1:] in guards]
190 exactpos = [g for g in pos if g[1:] in guards]
191 if pos:
191 if pos:
192 if exactpos:
192 if exactpos:
193 return True, exactpos[0]
193 return True, exactpos[0]
194 return False, pos
194 return False, pos
195 return True, ''
195 return True, ''
196
196
197 def explain_pushable(self, idx, all_patches=False):
197 def explain_pushable(self, idx, all_patches=False):
198 write = all_patches and self.ui.write or self.ui.warn
198 write = all_patches and self.ui.write or self.ui.warn
199 if all_patches or self.ui.verbose:
199 if all_patches or self.ui.verbose:
200 if isinstance(idx, str):
200 if isinstance(idx, str):
201 idx = self.series.index(idx)
201 idx = self.series.index(idx)
202 pushable, why = self.pushable(idx)
202 pushable, why = self.pushable(idx)
203 if all_patches and pushable:
203 if all_patches and pushable:
204 if why is None:
204 if why is None:
205 write(_('allowing %s - no guards in effect\n') %
205 write(_('allowing %s - no guards in effect\n') %
206 self.series[idx])
206 self.series[idx])
207 else:
207 else:
208 if not why:
208 if not why:
209 write(_('allowing %s - no matching negative guards\n') %
209 write(_('allowing %s - no matching negative guards\n') %
210 self.series[idx])
210 self.series[idx])
211 else:
211 else:
212 write(_('allowing %s - guarded by %r\n') %
212 write(_('allowing %s - guarded by %r\n') %
213 (self.series[idx], why))
213 (self.series[idx], why))
214 if not pushable:
214 if not pushable:
215 if why:
215 if why:
216 write(_('skipping %s - guarded by %r\n') %
216 write(_('skipping %s - guarded by %r\n') %
217 (self.series[idx], why))
217 (self.series[idx], why))
218 else:
218 else:
219 write(_('skipping %s - no matching guards\n') %
219 write(_('skipping %s - no matching guards\n') %
220 self.series[idx])
220 self.series[idx])
221
221
222 def save_dirty(self):
222 def save_dirty(self):
223 def write_list(items, path):
223 def write_list(items, path):
224 fp = self.opener(path, 'w')
224 fp = self.opener(path, 'w')
225 for i in items:
225 for i in items:
226 print >> fp, i
226 print >> fp, i
227 fp.close()
227 fp.close()
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231
231
232 def readheaders(self, patch):
232 def readheaders(self, patch):
233 def eatdiff(lines):
233 def eatdiff(lines):
234 while lines:
234 while lines:
235 l = lines[-1]
235 l = lines[-1]
236 if (l.startswith("diff -") or
236 if (l.startswith("diff -") or
237 l.startswith("Index:") or
237 l.startswith("Index:") or
238 l.startswith("===========")):
238 l.startswith("===========")):
239 del lines[-1]
239 del lines[-1]
240 else:
240 else:
241 break
241 break
242 def eatempty(lines):
242 def eatempty(lines):
243 while lines:
243 while lines:
244 l = lines[-1]
244 l = lines[-1]
245 if re.match('\s*$', l):
245 if re.match('\s*$', l):
246 del lines[-1]
246 del lines[-1]
247 else:
247 else:
248 break
248 break
249
249
250 pf = self.join(patch)
250 pf = self.join(patch)
251 message = []
251 message = []
252 comments = []
252 comments = []
253 user = None
253 user = None
254 date = None
254 date = None
255 format = None
255 format = None
256 subject = None
256 subject = None
257 diffstart = 0
257 diffstart = 0
258
258
259 for line in file(pf):
259 for line in file(pf):
260 line = line.rstrip()
260 line = line.rstrip()
261 if line.startswith('diff --git'):
261 if line.startswith('diff --git'):
262 diffstart = 2
262 diffstart = 2
263 break
263 break
264 if diffstart:
264 if diffstart:
265 if line.startswith('+++ '):
265 if line.startswith('+++ '):
266 diffstart = 2
266 diffstart = 2
267 break
267 break
268 if line.startswith("--- "):
268 if line.startswith("--- "):
269 diffstart = 1
269 diffstart = 1
270 continue
270 continue
271 elif format == "hgpatch":
271 elif format == "hgpatch":
272 # parse values when importing the result of an hg export
272 # parse values when importing the result of an hg export
273 if line.startswith("# User "):
273 if line.startswith("# User "):
274 user = line[7:]
274 user = line[7:]
275 elif line.startswith("# Date "):
275 elif line.startswith("# Date "):
276 date = line[7:]
276 date = line[7:]
277 elif not line.startswith("# ") and line:
277 elif not line.startswith("# ") and line:
278 message.append(line)
278 message.append(line)
279 format = None
279 format = None
280 elif line == '# HG changeset patch':
280 elif line == '# HG changeset patch':
281 format = "hgpatch"
281 format = "hgpatch"
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 line.startswith("subject: "))):
283 line.startswith("subject: "))):
284 subject = line[9:]
284 subject = line[9:]
285 format = "tag"
285 format = "tag"
286 elif (format != "tagdone" and (line.startswith("From: ") or
286 elif (format != "tagdone" and (line.startswith("From: ") or
287 line.startswith("from: "))):
287 line.startswith("from: "))):
288 user = line[6:]
288 user = line[6:]
289 format = "tag"
289 format = "tag"
290 elif format == "tag" and line == "":
290 elif format == "tag" and line == "":
291 # when looking for tags (subject: from: etc) they
291 # when looking for tags (subject: from: etc) they
292 # end once you find a blank line in the source
292 # end once you find a blank line in the source
293 format = "tagdone"
293 format = "tagdone"
294 elif message or line:
294 elif message or line:
295 message.append(line)
295 message.append(line)
296 comments.append(line)
296 comments.append(line)
297
297
298 eatdiff(message)
298 eatdiff(message)
299 eatdiff(comments)
299 eatdiff(comments)
300 eatempty(message)
300 eatempty(message)
301 eatempty(comments)
301 eatempty(comments)
302
302
303 # make sure message isn't empty
303 # make sure message isn't empty
304 if format and format.startswith("tag") and subject:
304 if format and format.startswith("tag") and subject:
305 message.insert(0, "")
305 message.insert(0, "")
306 message.insert(0, subject)
306 message.insert(0, subject)
307 return (message, comments, user, date, diffstart > 1)
307 return (message, comments, user, date, diffstart > 1)
308
308
309 def removeundo(self, repo):
309 def removeundo(self, repo):
310 undo = repo.sjoin('undo')
310 undo = repo.sjoin('undo')
311 if not os.path.exists(undo):
311 if not os.path.exists(undo):
312 return
312 return
313 try:
313 try:
314 os.unlink(undo)
314 os.unlink(undo)
315 except OSError, inst:
315 except OSError, inst:
316 self.ui.warn('error removing undo: %s\n' % str(inst))
316 self.ui.warn('error removing undo: %s\n' % str(inst))
317
317
318 def printdiff(self, repo, node1, node2=None, files=None,
318 def printdiff(self, repo, node1, node2=None, files=None,
319 fp=None, changes=None, opts={}):
319 fp=None, changes=None, opts={}):
320 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
320 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
321
321
322 patch.diff(repo, node1, node2, fns, match=matchfn,
322 patch.diff(repo, node1, node2, fns, match=matchfn,
323 fp=fp, changes=changes, opts=self.diffopts())
323 fp=fp, changes=changes, opts=self.diffopts())
324
324
325 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
325 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
326 # first try just applying the patch
326 # first try just applying the patch
327 (err, n) = self.apply(repo, [ patch ], update_status=False,
327 (err, n) = self.apply(repo, [ patch ], update_status=False,
328 strict=True, merge=rev, wlock=wlock)
328 strict=True, merge=rev, wlock=wlock)
329
329
330 if err == 0:
330 if err == 0:
331 return (err, n)
331 return (err, n)
332
332
333 if n is None:
333 if n is None:
334 raise util.Abort(_("apply failed for patch %s") % patch)
334 raise util.Abort(_("apply failed for patch %s") % patch)
335
335
336 self.ui.warn("patch didn't work out, merging %s\n" % patch)
336 self.ui.warn("patch didn't work out, merging %s\n" % patch)
337
337
338 # apply failed, strip away that rev and merge.
338 # apply failed, strip away that rev and merge.
339 hg.clean(repo, head, wlock=wlock)
339 hg.clean(repo, head, wlock=wlock)
340 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
340 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
341
341
342 ctx = repo.changectx(rev)
342 ctx = repo.changectx(rev)
343 ret = hg.merge(repo, rev, wlock=wlock)
343 ret = hg.merge(repo, rev, wlock=wlock)
344 if ret:
344 if ret:
345 raise util.Abort(_("update returned %d") % ret)
345 raise util.Abort(_("update returned %d") % ret)
346 n = repo.commit(None, ctx.description(), ctx.user(),
346 n = repo.commit(None, ctx.description(), ctx.user(),
347 force=1, wlock=wlock)
347 force=1, wlock=wlock)
348 if n == None:
348 if n == None:
349 raise util.Abort(_("repo commit failed"))
349 raise util.Abort(_("repo commit failed"))
350 try:
350 try:
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 except:
352 except:
353 raise util.Abort(_("unable to read %s") % patch)
353 raise util.Abort(_("unable to read %s") % patch)
354
354
355 patchf = self.opener(patch, "w")
355 patchf = self.opener(patch, "w")
356 if comments:
356 if comments:
357 comments = "\n".join(comments) + '\n\n'
357 comments = "\n".join(comments) + '\n\n'
358 patchf.write(comments)
358 patchf.write(comments)
359 self.printdiff(repo, head, n, fp=patchf)
359 self.printdiff(repo, head, n, fp=patchf)
360 patchf.close()
360 patchf.close()
361 self.removeundo(repo)
361 self.removeundo(repo)
362 return (0, n)
362 return (0, n)
363
363
364 def qparents(self, repo, rev=None):
364 def qparents(self, repo, rev=None):
365 if rev is None:
365 if rev is None:
366 (p1, p2) = repo.dirstate.parents()
366 (p1, p2) = repo.dirstate.parents()
367 if p2 == revlog.nullid:
367 if p2 == revlog.nullid:
368 return p1
368 return p1
369 if len(self.applied) == 0:
369 if len(self.applied) == 0:
370 return None
370 return None
371 return revlog.bin(self.applied[-1].rev)
371 return revlog.bin(self.applied[-1].rev)
372 pp = repo.changelog.parents(rev)
372 pp = repo.changelog.parents(rev)
373 if pp[1] != revlog.nullid:
373 if pp[1] != revlog.nullid:
374 arevs = [ x.rev for x in self.applied ]
374 arevs = [ x.rev for x in self.applied ]
375 p0 = revlog.hex(pp[0])
375 p0 = revlog.hex(pp[0])
376 p1 = revlog.hex(pp[1])
376 p1 = revlog.hex(pp[1])
377 if p0 in arevs:
377 if p0 in arevs:
378 return pp[0]
378 return pp[0]
379 if p1 in arevs:
379 if p1 in arevs:
380 return pp[1]
380 return pp[1]
381 return pp[0]
381 return pp[0]
382
382
383 def mergepatch(self, repo, mergeq, series, wlock):
383 def mergepatch(self, repo, mergeq, series, wlock):
384 if len(self.applied) == 0:
384 if len(self.applied) == 0:
385 # each of the patches merged in will have two parents. This
385 # each of the patches merged in will have two parents. This
386 # can confuse the qrefresh, qdiff, and strip code because it
386 # can confuse the qrefresh, qdiff, and strip code because it
387 # needs to know which parent is actually in the patch queue.
387 # needs to know which parent is actually in the patch queue.
388 # so, we insert a merge marker with only one parent. This way
388 # so, we insert a merge marker with only one parent. This way
389 # the first patch in the queue is never a merge patch
389 # the first patch in the queue is never a merge patch
390 #
390 #
391 pname = ".hg.patches.merge.marker"
391 pname = ".hg.patches.merge.marker"
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
393 wlock=wlock)
393 wlock=wlock)
394 self.removeundo(repo)
394 self.removeundo(repo)
395 self.applied.append(statusentry(revlog.hex(n), pname))
395 self.applied.append(statusentry(revlog.hex(n), pname))
396 self.applied_dirty = 1
396 self.applied_dirty = 1
397
397
398 head = self.qparents(repo)
398 head = self.qparents(repo)
399
399
400 for patch in series:
400 for patch in series:
401 patch = mergeq.lookup(patch, strict=True)
401 patch = mergeq.lookup(patch, strict=True)
402 if not patch:
402 if not patch:
403 self.ui.warn("patch %s does not exist\n" % patch)
403 self.ui.warn("patch %s does not exist\n" % patch)
404 return (1, None)
404 return (1, None)
405 pushable, reason = self.pushable(patch)
405 pushable, reason = self.pushable(patch)
406 if not pushable:
406 if not pushable:
407 self.explain_pushable(patch, all_patches=True)
407 self.explain_pushable(patch, all_patches=True)
408 continue
408 continue
409 info = mergeq.isapplied(patch)
409 info = mergeq.isapplied(patch)
410 if not info:
410 if not info:
411 self.ui.warn("patch %s is not applied\n" % patch)
411 self.ui.warn("patch %s is not applied\n" % patch)
412 return (1, None)
412 return (1, None)
413 rev = revlog.bin(info[1])
413 rev = revlog.bin(info[1])
414 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
414 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
415 if head:
415 if head:
416 self.applied.append(statusentry(revlog.hex(head), patch))
416 self.applied.append(statusentry(revlog.hex(head), patch))
417 self.applied_dirty = 1
417 self.applied_dirty = 1
418 if err:
418 if err:
419 return (err, head)
419 return (err, head)
420 self.save_dirty()
420 self.save_dirty()
421 return (0, head)
421 return (0, head)
422
422
423 def patch(self, repo, patchfile):
423 def patch(self, repo, patchfile):
424 '''Apply patchfile to the working directory.
424 '''Apply patchfile to the working directory.
425 patchfile: file name of patch'''
425 patchfile: file name of patch'''
426 files = {}
426 files = {}
427 try:
427 try:
428 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
429 files=files)
429 files=files)
430 except Exception, inst:
430 except Exception, inst:
431 self.ui.note(str(inst) + '\n')
431 self.ui.note(str(inst) + '\n')
432 if not self.ui.verbose:
432 if not self.ui.verbose:
433 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 self.ui.warn("patch failed, unable to continue (try -v)\n")
434 return (False, files, False)
434 return (False, files, False)
435
435
436 return (True, files, fuzz)
436 return (True, files, fuzz)
437
437
438 def apply(self, repo, series, list=False, update_status=True,
438 def apply(self, repo, series, list=False, update_status=True,
439 strict=False, patchdir=None, merge=None, wlock=None,
439 strict=False, patchdir=None, merge=None, wlock=None,
440 all_files={}):
440 all_files={}):
441 if not wlock:
441 if not wlock:
442 wlock = repo.wlock()
442 wlock = repo.wlock()
443 lock = repo.lock()
443 lock = repo.lock()
444 tr = repo.transaction()
444 tr = repo.transaction()
445 try:
445 try:
446 ret = self._apply(tr, repo, series, list, update_status,
446 ret = self._apply(tr, repo, series, list, update_status,
447 strict, patchdir, merge, wlock,
447 strict, patchdir, merge, wlock,
448 lock=lock, all_files=all_files)
448 lock=lock, all_files=all_files)
449 tr.close()
449 tr.close()
450 self.save_dirty()
450 self.save_dirty()
451 return ret
451 return ret
452 except:
452 except:
453 try:
453 try:
454 tr.abort()
454 tr.abort()
455 finally:
455 finally:
456 repo.reload()
456 repo.invalidate()
457 repo.wreload()
457 repo.dirstate.invalidate()
458 raise
458 raise
459
459
460 def _apply(self, tr, repo, series, list=False, update_status=True,
460 def _apply(self, tr, repo, series, list=False, update_status=True,
461 strict=False, patchdir=None, merge=None, wlock=None,
461 strict=False, patchdir=None, merge=None, wlock=None,
462 lock=None, all_files={}):
462 lock=None, all_files={}):
463 # TODO unify with commands.py
463 # TODO unify with commands.py
464 if not patchdir:
464 if not patchdir:
465 patchdir = self.path
465 patchdir = self.path
466 err = 0
466 err = 0
467 n = None
467 n = None
468 for patchname in series:
468 for patchname in series:
469 pushable, reason = self.pushable(patchname)
469 pushable, reason = self.pushable(patchname)
470 if not pushable:
470 if not pushable:
471 self.explain_pushable(patchname, all_patches=True)
471 self.explain_pushable(patchname, all_patches=True)
472 continue
472 continue
473 self.ui.warn("applying %s\n" % patchname)
473 self.ui.warn("applying %s\n" % patchname)
474 pf = os.path.join(patchdir, patchname)
474 pf = os.path.join(patchdir, patchname)
475
475
476 try:
476 try:
477 message, comments, user, date, patchfound = self.readheaders(patchname)
477 message, comments, user, date, patchfound = self.readheaders(patchname)
478 except:
478 except:
479 self.ui.warn("Unable to read %s\n" % patchname)
479 self.ui.warn("Unable to read %s\n" % patchname)
480 err = 1
480 err = 1
481 break
481 break
482
482
483 if not message:
483 if not message:
484 message = "imported patch %s\n" % patchname
484 message = "imported patch %s\n" % patchname
485 else:
485 else:
486 if list:
486 if list:
487 message.append("\nimported patch %s" % patchname)
487 message.append("\nimported patch %s" % patchname)
488 message = '\n'.join(message)
488 message = '\n'.join(message)
489
489
490 (patcherr, files, fuzz) = self.patch(repo, pf)
490 (patcherr, files, fuzz) = self.patch(repo, pf)
491 all_files.update(files)
491 all_files.update(files)
492 patcherr = not patcherr
492 patcherr = not patcherr
493
493
494 if merge and files:
494 if merge and files:
495 # Mark as removed/merged and update dirstate parent info
495 # Mark as removed/merged and update dirstate parent info
496 removed = []
496 removed = []
497 merged = []
497 merged = []
498 for f in files:
498 for f in files:
499 if os.path.exists(repo.dirstate.wjoin(f)):
499 if os.path.exists(repo.dirstate.wjoin(f)):
500 merged.append(f)
500 merged.append(f)
501 else:
501 else:
502 removed.append(f)
502 removed.append(f)
503 repo.dirstate.update(repo.dirstate.filterfiles(removed), 'r')
503 repo.dirstate.update(repo.dirstate.filterfiles(removed), 'r')
504 repo.dirstate.update(repo.dirstate.filterfiles(merged), 'm')
504 repo.dirstate.update(repo.dirstate.filterfiles(merged), 'm')
505 p1, p2 = repo.dirstate.parents()
505 p1, p2 = repo.dirstate.parents()
506 repo.dirstate.setparents(p1, merge)
506 repo.dirstate.setparents(p1, merge)
507 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
507 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
508 n = repo.commit(files, message, user, date, force=1, lock=lock,
508 n = repo.commit(files, message, user, date, force=1, lock=lock,
509 wlock=wlock)
509 wlock=wlock)
510
510
511 if n == None:
511 if n == None:
512 raise util.Abort(_("repo commit failed"))
512 raise util.Abort(_("repo commit failed"))
513
513
514 if update_status:
514 if update_status:
515 self.applied.append(statusentry(revlog.hex(n), patchname))
515 self.applied.append(statusentry(revlog.hex(n), patchname))
516
516
517 if patcherr:
517 if patcherr:
518 if not patchfound:
518 if not patchfound:
519 self.ui.warn("patch %s is empty\n" % patchname)
519 self.ui.warn("patch %s is empty\n" % patchname)
520 err = 0
520 err = 0
521 else:
521 else:
522 self.ui.warn("patch failed, rejects left in working dir\n")
522 self.ui.warn("patch failed, rejects left in working dir\n")
523 err = 1
523 err = 1
524 break
524 break
525
525
526 if fuzz and strict:
526 if fuzz and strict:
527 self.ui.warn("fuzz found when applying patch, stopping\n")
527 self.ui.warn("fuzz found when applying patch, stopping\n")
528 err = 1
528 err = 1
529 break
529 break
530 self.removeundo(repo)
530 self.removeundo(repo)
531 return (err, n)
531 return (err, n)
532
532
533 def delete(self, repo, patches, opts):
533 def delete(self, repo, patches, opts):
534 realpatches = []
534 realpatches = []
535 for patch in patches:
535 for patch in patches:
536 patch = self.lookup(patch, strict=True)
536 patch = self.lookup(patch, strict=True)
537 info = self.isapplied(patch)
537 info = self.isapplied(patch)
538 if info:
538 if info:
539 raise util.Abort(_("cannot delete applied patch %s") % patch)
539 raise util.Abort(_("cannot delete applied patch %s") % patch)
540 if patch not in self.series:
540 if patch not in self.series:
541 raise util.Abort(_("patch %s not in series file") % patch)
541 raise util.Abort(_("patch %s not in series file") % patch)
542 realpatches.append(patch)
542 realpatches.append(patch)
543
543
544 appliedbase = 0
544 appliedbase = 0
545 if opts.get('rev'):
545 if opts.get('rev'):
546 if not self.applied:
546 if not self.applied:
547 raise util.Abort(_('no patches applied'))
547 raise util.Abort(_('no patches applied'))
548 revs = cmdutil.revrange(repo, opts['rev'])
548 revs = cmdutil.revrange(repo, opts['rev'])
549 if len(revs) > 1 and revs[0] > revs[1]:
549 if len(revs) > 1 and revs[0] > revs[1]:
550 revs.reverse()
550 revs.reverse()
551 for rev in revs:
551 for rev in revs:
552 if appliedbase >= len(self.applied):
552 if appliedbase >= len(self.applied):
553 raise util.Abort(_("revision %d is not managed") % rev)
553 raise util.Abort(_("revision %d is not managed") % rev)
554
554
555 base = revlog.bin(self.applied[appliedbase].rev)
555 base = revlog.bin(self.applied[appliedbase].rev)
556 node = repo.changelog.node(rev)
556 node = repo.changelog.node(rev)
557 if node != base:
557 if node != base:
558 raise util.Abort(_("cannot delete revision %d above "
558 raise util.Abort(_("cannot delete revision %d above "
559 "applied patches") % rev)
559 "applied patches") % rev)
560 realpatches.append(self.applied[appliedbase].name)
560 realpatches.append(self.applied[appliedbase].name)
561 appliedbase += 1
561 appliedbase += 1
562
562
563 if not opts.get('keep'):
563 if not opts.get('keep'):
564 r = self.qrepo()
564 r = self.qrepo()
565 if r:
565 if r:
566 r.remove(realpatches, True)
566 r.remove(realpatches, True)
567 else:
567 else:
568 for p in realpatches:
568 for p in realpatches:
569 os.unlink(self.join(p))
569 os.unlink(self.join(p))
570
570
571 if appliedbase:
571 if appliedbase:
572 del self.applied[:appliedbase]
572 del self.applied[:appliedbase]
573 self.applied_dirty = 1
573 self.applied_dirty = 1
574 indices = [self.find_series(p) for p in realpatches]
574 indices = [self.find_series(p) for p in realpatches]
575 indices.sort()
575 indices.sort()
576 for i in indices[-1::-1]:
576 for i in indices[-1::-1]:
577 del self.full_series[i]
577 del self.full_series[i]
578 self.parse_series()
578 self.parse_series()
579 self.series_dirty = 1
579 self.series_dirty = 1
580
580
581 def check_toppatch(self, repo):
581 def check_toppatch(self, repo):
582 if len(self.applied) > 0:
582 if len(self.applied) > 0:
583 top = revlog.bin(self.applied[-1].rev)
583 top = revlog.bin(self.applied[-1].rev)
584 pp = repo.dirstate.parents()
584 pp = repo.dirstate.parents()
585 if top not in pp:
585 if top not in pp:
586 raise util.Abort(_("queue top not at same revision as working directory"))
586 raise util.Abort(_("queue top not at same revision as working directory"))
587 return top
587 return top
588 return None
588 return None
589 def check_localchanges(self, repo, force=False, refresh=True):
589 def check_localchanges(self, repo, force=False, refresh=True):
590 m, a, r, d = repo.status()[:4]
590 m, a, r, d = repo.status()[:4]
591 if m or a or r or d:
591 if m or a or r or d:
592 if not force:
592 if not force:
593 if refresh:
593 if refresh:
594 raise util.Abort(_("local changes found, refresh first"))
594 raise util.Abort(_("local changes found, refresh first"))
595 else:
595 else:
596 raise util.Abort(_("local changes found"))
596 raise util.Abort(_("local changes found"))
597 return m, a, r, d
597 return m, a, r, d
598 def new(self, repo, patch, msg=None, force=None):
598 def new(self, repo, patch, msg=None, force=None):
599 if os.path.exists(self.join(patch)):
599 if os.path.exists(self.join(patch)):
600 raise util.Abort(_('patch "%s" already exists') % patch)
600 raise util.Abort(_('patch "%s" already exists') % patch)
601 m, a, r, d = self.check_localchanges(repo, force)
601 m, a, r, d = self.check_localchanges(repo, force)
602 commitfiles = m + a + r
602 commitfiles = m + a + r
603 self.check_toppatch(repo)
603 self.check_toppatch(repo)
604 wlock = repo.wlock()
604 wlock = repo.wlock()
605 insert = self.full_series_end()
605 insert = self.full_series_end()
606 if msg:
606 if msg:
607 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
607 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
608 wlock=wlock)
608 wlock=wlock)
609 else:
609 else:
610 n = repo.commit(commitfiles,
610 n = repo.commit(commitfiles,
611 "New patch: %s" % patch, force=True, wlock=wlock)
611 "New patch: %s" % patch, force=True, wlock=wlock)
612 if n == None:
612 if n == None:
613 raise util.Abort(_("repo commit failed"))
613 raise util.Abort(_("repo commit failed"))
614 self.full_series[insert:insert] = [patch]
614 self.full_series[insert:insert] = [patch]
615 self.applied.append(statusentry(revlog.hex(n), patch))
615 self.applied.append(statusentry(revlog.hex(n), patch))
616 self.parse_series()
616 self.parse_series()
617 self.series_dirty = 1
617 self.series_dirty = 1
618 self.applied_dirty = 1
618 self.applied_dirty = 1
619 p = self.opener(patch, "w")
619 p = self.opener(patch, "w")
620 if msg:
620 if msg:
621 msg = msg + "\n"
621 msg = msg + "\n"
622 p.write(msg)
622 p.write(msg)
623 p.close()
623 p.close()
624 wlock = None
624 wlock = None
625 r = self.qrepo()
625 r = self.qrepo()
626 if r: r.add([patch])
626 if r: r.add([patch])
627 if commitfiles:
627 if commitfiles:
628 self.refresh(repo, short=True)
628 self.refresh(repo, short=True)
629 self.removeundo(repo)
629 self.removeundo(repo)
630
630
631 def strip(self, repo, rev, update=True, backup="all", wlock=None):
631 def strip(self, repo, rev, update=True, backup="all", wlock=None):
632 def limitheads(chlog, stop):
632 def limitheads(chlog, stop):
633 """return the list of all nodes that have no children"""
633 """return the list of all nodes that have no children"""
634 p = {}
634 p = {}
635 h = []
635 h = []
636 stoprev = 0
636 stoprev = 0
637 if stop in chlog.nodemap:
637 if stop in chlog.nodemap:
638 stoprev = chlog.rev(stop)
638 stoprev = chlog.rev(stop)
639
639
640 for r in xrange(chlog.count() - 1, -1, -1):
640 for r in xrange(chlog.count() - 1, -1, -1):
641 n = chlog.node(r)
641 n = chlog.node(r)
642 if n not in p:
642 if n not in p:
643 h.append(n)
643 h.append(n)
644 if n == stop:
644 if n == stop:
645 break
645 break
646 if r < stoprev:
646 if r < stoprev:
647 break
647 break
648 for pn in chlog.parents(n):
648 for pn in chlog.parents(n):
649 p[pn] = 1
649 p[pn] = 1
650 return h
650 return h
651
651
652 def bundle(cg):
652 def bundle(cg):
653 backupdir = repo.join("strip-backup")
653 backupdir = repo.join("strip-backup")
654 if not os.path.isdir(backupdir):
654 if not os.path.isdir(backupdir):
655 os.mkdir(backupdir)
655 os.mkdir(backupdir)
656 name = os.path.join(backupdir, "%s" % revlog.short(rev))
656 name = os.path.join(backupdir, "%s" % revlog.short(rev))
657 name = savename(name)
657 name = savename(name)
658 self.ui.warn("saving bundle to %s\n" % name)
658 self.ui.warn("saving bundle to %s\n" % name)
659 return changegroup.writebundle(cg, name, "HG10BZ")
659 return changegroup.writebundle(cg, name, "HG10BZ")
660
660
661 def stripall(revnum):
661 def stripall(revnum):
662 mm = repo.changectx(rev).manifest()
662 mm = repo.changectx(rev).manifest()
663 seen = {}
663 seen = {}
664
664
665 for x in xrange(revnum, repo.changelog.count()):
665 for x in xrange(revnum, repo.changelog.count()):
666 for f in repo.changectx(x).files():
666 for f in repo.changectx(x).files():
667 if f in seen:
667 if f in seen:
668 continue
668 continue
669 seen[f] = 1
669 seen[f] = 1
670 if f in mm:
670 if f in mm:
671 filerev = mm[f]
671 filerev = mm[f]
672 else:
672 else:
673 filerev = 0
673 filerev = 0
674 seen[f] = filerev
674 seen[f] = filerev
675 # we go in two steps here so the strip loop happens in a
675 # we go in two steps here so the strip loop happens in a
676 # sensible order. When stripping many files, this helps keep
676 # sensible order. When stripping many files, this helps keep
677 # our disk access patterns under control.
677 # our disk access patterns under control.
678 seen_list = seen.keys()
678 seen_list = seen.keys()
679 seen_list.sort()
679 seen_list.sort()
680 for f in seen_list:
680 for f in seen_list:
681 ff = repo.file(f)
681 ff = repo.file(f)
682 filerev = seen[f]
682 filerev = seen[f]
683 if filerev != 0:
683 if filerev != 0:
684 if filerev in ff.nodemap:
684 if filerev in ff.nodemap:
685 filerev = ff.rev(filerev)
685 filerev = ff.rev(filerev)
686 else:
686 else:
687 filerev = 0
687 filerev = 0
688 ff.strip(filerev, revnum)
688 ff.strip(filerev, revnum)
689
689
690 if not wlock:
690 if not wlock:
691 wlock = repo.wlock()
691 wlock = repo.wlock()
692 lock = repo.lock()
692 lock = repo.lock()
693 chlog = repo.changelog
693 chlog = repo.changelog
694 # TODO delete the undo files, and handle undo of merge sets
694 # TODO delete the undo files, and handle undo of merge sets
695 pp = chlog.parents(rev)
695 pp = chlog.parents(rev)
696 revnum = chlog.rev(rev)
696 revnum = chlog.rev(rev)
697
697
698 if update:
698 if update:
699 self.check_localchanges(repo, refresh=False)
699 self.check_localchanges(repo, refresh=False)
700 urev = self.qparents(repo, rev)
700 urev = self.qparents(repo, rev)
701 hg.clean(repo, urev, wlock=wlock)
701 hg.clean(repo, urev, wlock=wlock)
702 repo.dirstate.write()
702 repo.dirstate.write()
703
703
704 # save is a list of all the branches we are truncating away
704 # save is a list of all the branches we are truncating away
705 # that we actually want to keep. changegroup will be used
705 # that we actually want to keep. changegroup will be used
706 # to preserve them and add them back after the truncate
706 # to preserve them and add them back after the truncate
707 saveheads = []
707 saveheads = []
708 savebases = {}
708 savebases = {}
709
709
710 heads = limitheads(chlog, rev)
710 heads = limitheads(chlog, rev)
711 seen = {}
711 seen = {}
712
712
713 # search through all the heads, finding those where the revision
713 # search through all the heads, finding those where the revision
714 # we want to strip away is an ancestor. Also look for merges
714 # we want to strip away is an ancestor. Also look for merges
715 # that might be turned into new heads by the strip.
715 # that might be turned into new heads by the strip.
716 while heads:
716 while heads:
717 h = heads.pop()
717 h = heads.pop()
718 n = h
718 n = h
719 while True:
719 while True:
720 seen[n] = 1
720 seen[n] = 1
721 pp = chlog.parents(n)
721 pp = chlog.parents(n)
722 if pp[1] != revlog.nullid:
722 if pp[1] != revlog.nullid:
723 for p in pp:
723 for p in pp:
724 if chlog.rev(p) > revnum and p not in seen:
724 if chlog.rev(p) > revnum and p not in seen:
725 heads.append(p)
725 heads.append(p)
726 if pp[0] == revlog.nullid:
726 if pp[0] == revlog.nullid:
727 break
727 break
728 if chlog.rev(pp[0]) < revnum:
728 if chlog.rev(pp[0]) < revnum:
729 break
729 break
730 n = pp[0]
730 n = pp[0]
731 if n == rev:
731 if n == rev:
732 break
732 break
733 r = chlog.reachable(h, rev)
733 r = chlog.reachable(h, rev)
734 if rev not in r:
734 if rev not in r:
735 saveheads.append(h)
735 saveheads.append(h)
736 for x in r:
736 for x in r:
737 if chlog.rev(x) > revnum:
737 if chlog.rev(x) > revnum:
738 savebases[x] = 1
738 savebases[x] = 1
739
739
740 # create a changegroup for all the branches we need to keep
740 # create a changegroup for all the branches we need to keep
741 if backup == "all":
741 if backup == "all":
742 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
742 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
743 bundle(backupch)
743 bundle(backupch)
744 if saveheads:
744 if saveheads:
745 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
745 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
746 chgrpfile = bundle(backupch)
746 chgrpfile = bundle(backupch)
747
747
748 stripall(revnum)
748 stripall(revnum)
749
749
750 change = chlog.read(rev)
750 change = chlog.read(rev)
751 chlog.strip(revnum, revnum)
751 chlog.strip(revnum, revnum)
752 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
752 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
753 self.removeundo(repo)
753 self.removeundo(repo)
754 if saveheads:
754 if saveheads:
755 self.ui.status("adding branch\n")
755 self.ui.status("adding branch\n")
756 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
756 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
757 update=False)
757 update=False)
758 if backup != "strip":
758 if backup != "strip":
759 os.unlink(chgrpfile)
759 os.unlink(chgrpfile)
760
760
761 def isapplied(self, patch):
761 def isapplied(self, patch):
762 """returns (index, rev, patch)"""
762 """returns (index, rev, patch)"""
763 for i in xrange(len(self.applied)):
763 for i in xrange(len(self.applied)):
764 a = self.applied[i]
764 a = self.applied[i]
765 if a.name == patch:
765 if a.name == patch:
766 return (i, a.rev, a.name)
766 return (i, a.rev, a.name)
767 return None
767 return None
768
768
769 # if the exact patch name does not exist, we try a few
769 # if the exact patch name does not exist, we try a few
770 # variations. If strict is passed, we try only #1
770 # variations. If strict is passed, we try only #1
771 #
771 #
772 # 1) a number to indicate an offset in the series file
772 # 1) a number to indicate an offset in the series file
773 # 2) a unique substring of the patch name was given
773 # 2) a unique substring of the patch name was given
774 # 3) patchname[-+]num to indicate an offset in the series file
774 # 3) patchname[-+]num to indicate an offset in the series file
775 def lookup(self, patch, strict=False):
775 def lookup(self, patch, strict=False):
776 patch = patch and str(patch)
776 patch = patch and str(patch)
777
777
778 def partial_name(s):
778 def partial_name(s):
779 if s in self.series:
779 if s in self.series:
780 return s
780 return s
781 matches = [x for x in self.series if s in x]
781 matches = [x for x in self.series if s in x]
782 if len(matches) > 1:
782 if len(matches) > 1:
783 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
783 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
784 for m in matches:
784 for m in matches:
785 self.ui.warn(' %s\n' % m)
785 self.ui.warn(' %s\n' % m)
786 return None
786 return None
787 if matches:
787 if matches:
788 return matches[0]
788 return matches[0]
789 if len(self.series) > 0 and len(self.applied) > 0:
789 if len(self.series) > 0 and len(self.applied) > 0:
790 if s == 'qtip':
790 if s == 'qtip':
791 return self.series[self.series_end(True)-1]
791 return self.series[self.series_end(True)-1]
792 if s == 'qbase':
792 if s == 'qbase':
793 return self.series[0]
793 return self.series[0]
794 return None
794 return None
795 if patch == None:
795 if patch == None:
796 return None
796 return None
797
797
798 # we don't want to return a partial match until we make
798 # we don't want to return a partial match until we make
799 # sure the file name passed in does not exist (checked below)
799 # sure the file name passed in does not exist (checked below)
800 res = partial_name(patch)
800 res = partial_name(patch)
801 if res and res == patch:
801 if res and res == patch:
802 return res
802 return res
803
803
804 if not os.path.isfile(self.join(patch)):
804 if not os.path.isfile(self.join(patch)):
805 try:
805 try:
806 sno = int(patch)
806 sno = int(patch)
807 except(ValueError, OverflowError):
807 except(ValueError, OverflowError):
808 pass
808 pass
809 else:
809 else:
810 if sno < len(self.series):
810 if sno < len(self.series):
811 return self.series[sno]
811 return self.series[sno]
812 if not strict:
812 if not strict:
813 # return any partial match made above
813 # return any partial match made above
814 if res:
814 if res:
815 return res
815 return res
816 minus = patch.rfind('-')
816 minus = patch.rfind('-')
817 if minus >= 0:
817 if minus >= 0:
818 res = partial_name(patch[:minus])
818 res = partial_name(patch[:minus])
819 if res:
819 if res:
820 i = self.series.index(res)
820 i = self.series.index(res)
821 try:
821 try:
822 off = int(patch[minus+1:] or 1)
822 off = int(patch[minus+1:] or 1)
823 except(ValueError, OverflowError):
823 except(ValueError, OverflowError):
824 pass
824 pass
825 else:
825 else:
826 if i - off >= 0:
826 if i - off >= 0:
827 return self.series[i - off]
827 return self.series[i - off]
828 plus = patch.rfind('+')
828 plus = patch.rfind('+')
829 if plus >= 0:
829 if plus >= 0:
830 res = partial_name(patch[:plus])
830 res = partial_name(patch[:plus])
831 if res:
831 if res:
832 i = self.series.index(res)
832 i = self.series.index(res)
833 try:
833 try:
834 off = int(patch[plus+1:] or 1)
834 off = int(patch[plus+1:] or 1)
835 except(ValueError, OverflowError):
835 except(ValueError, OverflowError):
836 pass
836 pass
837 else:
837 else:
838 if i + off < len(self.series):
838 if i + off < len(self.series):
839 return self.series[i + off]
839 return self.series[i + off]
840 raise util.Abort(_("patch %s not in series") % patch)
840 raise util.Abort(_("patch %s not in series") % patch)
841
841
842 def push(self, repo, patch=None, force=False, list=False,
842 def push(self, repo, patch=None, force=False, list=False,
843 mergeq=None, wlock=None):
843 mergeq=None, wlock=None):
844 if not wlock:
844 if not wlock:
845 wlock = repo.wlock()
845 wlock = repo.wlock()
846 patch = self.lookup(patch)
846 patch = self.lookup(patch)
847 # Suppose our series file is: A B C and the current 'top' patch is B.
847 # Suppose our series file is: A B C and the current 'top' patch is B.
848 # qpush C should be performed (moving forward)
848 # qpush C should be performed (moving forward)
849 # qpush B is a NOP (no change)
849 # qpush B is a NOP (no change)
850 # qpush A is an error (can't go backwards with qpush)
850 # qpush A is an error (can't go backwards with qpush)
851 if patch:
851 if patch:
852 info = self.isapplied(patch)
852 info = self.isapplied(patch)
853 if info:
853 if info:
854 if info[0] < len(self.applied) - 1:
854 if info[0] < len(self.applied) - 1:
855 raise util.Abort(_("cannot push to a previous patch: %s") %
855 raise util.Abort(_("cannot push to a previous patch: %s") %
856 patch)
856 patch)
857 if info[0] < len(self.series) - 1:
857 if info[0] < len(self.series) - 1:
858 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
858 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
859 else:
859 else:
860 self.ui.warn(_('all patches are currently applied\n'))
860 self.ui.warn(_('all patches are currently applied\n'))
861 return
861 return
862
862
863 # Following the above example, starting at 'top' of B:
863 # Following the above example, starting at 'top' of B:
864 # qpush should be performed (pushes C), but a subsequent qpush without
864 # qpush should be performed (pushes C), but a subsequent qpush without
865 # an argument is an error (nothing to apply). This allows a loop
865 # an argument is an error (nothing to apply). This allows a loop
866 # of "...while hg qpush..." to work as it detects an error when done
866 # of "...while hg qpush..." to work as it detects an error when done
867 if self.series_end() == len(self.series):
867 if self.series_end() == len(self.series):
868 self.ui.warn(_('patch series already fully applied\n'))
868 self.ui.warn(_('patch series already fully applied\n'))
869 return 1
869 return 1
870 if not force:
870 if not force:
871 self.check_localchanges(repo)
871 self.check_localchanges(repo)
872
872
873 self.applied_dirty = 1;
873 self.applied_dirty = 1;
874 start = self.series_end()
874 start = self.series_end()
875 if start > 0:
875 if start > 0:
876 self.check_toppatch(repo)
876 self.check_toppatch(repo)
877 if not patch:
877 if not patch:
878 patch = self.series[start]
878 patch = self.series[start]
879 end = start + 1
879 end = start + 1
880 else:
880 else:
881 end = self.series.index(patch, start) + 1
881 end = self.series.index(patch, start) + 1
882 s = self.series[start:end]
882 s = self.series[start:end]
883 all_files = {}
883 all_files = {}
884 try:
884 try:
885 if mergeq:
885 if mergeq:
886 ret = self.mergepatch(repo, mergeq, s, wlock)
886 ret = self.mergepatch(repo, mergeq, s, wlock)
887 else:
887 else:
888 ret = self.apply(repo, s, list, wlock=wlock,
888 ret = self.apply(repo, s, list, wlock=wlock,
889 all_files=all_files)
889 all_files=all_files)
890 except:
890 except:
891 self.ui.warn(_('cleaning up working directory...'))
891 self.ui.warn(_('cleaning up working directory...'))
892 node = repo.dirstate.parents()[0]
892 node = repo.dirstate.parents()[0]
893 hg.revert(repo, node, None, wlock)
893 hg.revert(repo, node, None, wlock)
894 unknown = repo.status(wlock=wlock)[4]
894 unknown = repo.status(wlock=wlock)[4]
895 # only remove unknown files that we know we touched or
895 # only remove unknown files that we know we touched or
896 # created while patching
896 # created while patching
897 for f in unknown:
897 for f in unknown:
898 if f in all_files:
898 if f in all_files:
899 util.unlink(repo.wjoin(f))
899 util.unlink(repo.wjoin(f))
900 self.ui.warn(_('done\n'))
900 self.ui.warn(_('done\n'))
901 raise
901 raise
902 top = self.applied[-1].name
902 top = self.applied[-1].name
903 if ret[0]:
903 if ret[0]:
904 self.ui.write("Errors during apply, please fix and refresh %s\n" %
904 self.ui.write("Errors during apply, please fix and refresh %s\n" %
905 top)
905 top)
906 else:
906 else:
907 self.ui.write("Now at: %s\n" % top)
907 self.ui.write("Now at: %s\n" % top)
908 return ret[0]
908 return ret[0]
909
909
910 def pop(self, repo, patch=None, force=False, update=True, all=False,
910 def pop(self, repo, patch=None, force=False, update=True, all=False,
911 wlock=None):
911 wlock=None):
912 def getfile(f, rev):
912 def getfile(f, rev):
913 t = repo.file(f).read(rev)
913 t = repo.file(f).read(rev)
914 repo.wfile(f, "w").write(t)
914 repo.wfile(f, "w").write(t)
915
915
916 if not wlock:
916 if not wlock:
917 wlock = repo.wlock()
917 wlock = repo.wlock()
918 if patch:
918 if patch:
919 # index, rev, patch
919 # index, rev, patch
920 info = self.isapplied(patch)
920 info = self.isapplied(patch)
921 if not info:
921 if not info:
922 patch = self.lookup(patch)
922 patch = self.lookup(patch)
923 info = self.isapplied(patch)
923 info = self.isapplied(patch)
924 if not info:
924 if not info:
925 raise util.Abort(_("patch %s is not applied") % patch)
925 raise util.Abort(_("patch %s is not applied") % patch)
926
926
927 if len(self.applied) == 0:
927 if len(self.applied) == 0:
928 # Allow qpop -a to work repeatedly,
928 # Allow qpop -a to work repeatedly,
929 # but not qpop without an argument
929 # but not qpop without an argument
930 self.ui.warn(_("no patches applied\n"))
930 self.ui.warn(_("no patches applied\n"))
931 return not all
931 return not all
932
932
933 if not update:
933 if not update:
934 parents = repo.dirstate.parents()
934 parents = repo.dirstate.parents()
935 rr = [ revlog.bin(x.rev) for x in self.applied ]
935 rr = [ revlog.bin(x.rev) for x in self.applied ]
936 for p in parents:
936 for p in parents:
937 if p in rr:
937 if p in rr:
938 self.ui.warn("qpop: forcing dirstate update\n")
938 self.ui.warn("qpop: forcing dirstate update\n")
939 update = True
939 update = True
940
940
941 if not force and update:
941 if not force and update:
942 self.check_localchanges(repo)
942 self.check_localchanges(repo)
943
943
944 self.applied_dirty = 1;
944 self.applied_dirty = 1;
945 end = len(self.applied)
945 end = len(self.applied)
946 if not patch:
946 if not patch:
947 if all:
947 if all:
948 popi = 0
948 popi = 0
949 else:
949 else:
950 popi = len(self.applied) - 1
950 popi = len(self.applied) - 1
951 else:
951 else:
952 popi = info[0] + 1
952 popi = info[0] + 1
953 if popi >= end:
953 if popi >= end:
954 self.ui.warn("qpop: %s is already at the top\n" % patch)
954 self.ui.warn("qpop: %s is already at the top\n" % patch)
955 return
955 return
956 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
956 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
957
957
958 start = info[0]
958 start = info[0]
959 rev = revlog.bin(info[1])
959 rev = revlog.bin(info[1])
960
960
961 # we know there are no local changes, so we can make a simplified
961 # we know there are no local changes, so we can make a simplified
962 # form of hg.update.
962 # form of hg.update.
963 if update:
963 if update:
964 top = self.check_toppatch(repo)
964 top = self.check_toppatch(repo)
965 qp = self.qparents(repo, rev)
965 qp = self.qparents(repo, rev)
966 changes = repo.changelog.read(qp)
966 changes = repo.changelog.read(qp)
967 mmap = repo.manifest.read(changes[0])
967 mmap = repo.manifest.read(changes[0])
968 m, a, r, d, u = repo.status(qp, top)[:5]
968 m, a, r, d, u = repo.status(qp, top)[:5]
969 if d:
969 if d:
970 raise util.Abort("deletions found between repo revs")
970 raise util.Abort("deletions found between repo revs")
971 for f in m:
971 for f in m:
972 getfile(f, mmap[f])
972 getfile(f, mmap[f])
973 for f in r:
973 for f in r:
974 getfile(f, mmap[f])
974 getfile(f, mmap[f])
975 util.set_exec(repo.wjoin(f), mmap.execf(f))
975 util.set_exec(repo.wjoin(f), mmap.execf(f))
976 repo.dirstate.update(m + r, 'n')
976 repo.dirstate.update(m + r, 'n')
977 for f in a:
977 for f in a:
978 try:
978 try:
979 os.unlink(repo.wjoin(f))
979 os.unlink(repo.wjoin(f))
980 except OSError, e:
980 except OSError, e:
981 if e.errno != errno.ENOENT:
981 if e.errno != errno.ENOENT:
982 raise
982 raise
983 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
983 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
984 except: pass
984 except: pass
985 if a:
985 if a:
986 repo.dirstate.forget(a)
986 repo.dirstate.forget(a)
987 repo.dirstate.setparents(qp, revlog.nullid)
987 repo.dirstate.setparents(qp, revlog.nullid)
988 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
988 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
989 del self.applied[start:end]
989 del self.applied[start:end]
990 if len(self.applied):
990 if len(self.applied):
991 self.ui.write("Now at: %s\n" % self.applied[-1].name)
991 self.ui.write("Now at: %s\n" % self.applied[-1].name)
992 else:
992 else:
993 self.ui.write("Patch queue now empty\n")
993 self.ui.write("Patch queue now empty\n")
994
994
995 def diff(self, repo, pats, opts):
995 def diff(self, repo, pats, opts):
996 top = self.check_toppatch(repo)
996 top = self.check_toppatch(repo)
997 if not top:
997 if not top:
998 self.ui.write("No patches applied\n")
998 self.ui.write("No patches applied\n")
999 return
999 return
1000 qp = self.qparents(repo, top)
1000 qp = self.qparents(repo, top)
1001 if opts.get('git'):
1001 if opts.get('git'):
1002 self.diffopts().git = True
1002 self.diffopts().git = True
1003 self.printdiff(repo, qp, files=pats, opts=opts)
1003 self.printdiff(repo, qp, files=pats, opts=opts)
1004
1004
1005 def refresh(self, repo, pats=None, **opts):
1005 def refresh(self, repo, pats=None, **opts):
1006 if len(self.applied) == 0:
1006 if len(self.applied) == 0:
1007 self.ui.write("No patches applied\n")
1007 self.ui.write("No patches applied\n")
1008 return 1
1008 return 1
1009 wlock = repo.wlock()
1009 wlock = repo.wlock()
1010 self.check_toppatch(repo)
1010 self.check_toppatch(repo)
1011 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1011 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1012 top = revlog.bin(top)
1012 top = revlog.bin(top)
1013 cparents = repo.changelog.parents(top)
1013 cparents = repo.changelog.parents(top)
1014 patchparent = self.qparents(repo, top)
1014 patchparent = self.qparents(repo, top)
1015 message, comments, user, date, patchfound = self.readheaders(patchfn)
1015 message, comments, user, date, patchfound = self.readheaders(patchfn)
1016
1016
1017 patchf = self.opener(patchfn, "w")
1017 patchf = self.opener(patchfn, "w")
1018 msg = opts.get('msg', '').rstrip()
1018 msg = opts.get('msg', '').rstrip()
1019 if msg:
1019 if msg:
1020 if comments:
1020 if comments:
1021 # Remove existing message.
1021 # Remove existing message.
1022 ci = 0
1022 ci = 0
1023 subj = None
1023 subj = None
1024 for mi in xrange(len(message)):
1024 for mi in xrange(len(message)):
1025 if comments[ci].lower().startswith('subject: '):
1025 if comments[ci].lower().startswith('subject: '):
1026 subj = comments[ci][9:]
1026 subj = comments[ci][9:]
1027 while message[mi] != comments[ci] and message[mi] != subj:
1027 while message[mi] != comments[ci] and message[mi] != subj:
1028 ci += 1
1028 ci += 1
1029 del comments[ci]
1029 del comments[ci]
1030 comments.append(msg)
1030 comments.append(msg)
1031 if comments:
1031 if comments:
1032 comments = "\n".join(comments) + '\n\n'
1032 comments = "\n".join(comments) + '\n\n'
1033 patchf.write(comments)
1033 patchf.write(comments)
1034
1034
1035 if opts.get('git'):
1035 if opts.get('git'):
1036 self.diffopts().git = True
1036 self.diffopts().git = True
1037 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1037 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1038 tip = repo.changelog.tip()
1038 tip = repo.changelog.tip()
1039 if top == tip:
1039 if top == tip:
1040 # if the top of our patch queue is also the tip, there is an
1040 # if the top of our patch queue is also the tip, there is an
1041 # optimization here. We update the dirstate in place and strip
1041 # optimization here. We update the dirstate in place and strip
1042 # off the tip commit. Then just commit the current directory
1042 # off the tip commit. Then just commit the current directory
1043 # tree. We can also send repo.commit the list of files
1043 # tree. We can also send repo.commit the list of files
1044 # changed to speed up the diff
1044 # changed to speed up the diff
1045 #
1045 #
1046 # in short mode, we only diff the files included in the
1046 # in short mode, we only diff the files included in the
1047 # patch already
1047 # patch already
1048 #
1048 #
1049 # this should really read:
1049 # this should really read:
1050 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1050 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1051 # but we do it backwards to take advantage of manifest/chlog
1051 # but we do it backwards to take advantage of manifest/chlog
1052 # caching against the next repo.status call
1052 # caching against the next repo.status call
1053 #
1053 #
1054 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1054 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1055 changes = repo.changelog.read(tip)
1055 changes = repo.changelog.read(tip)
1056 man = repo.manifest.read(changes[0])
1056 man = repo.manifest.read(changes[0])
1057 aaa = aa[:]
1057 aaa = aa[:]
1058 if opts.get('short'):
1058 if opts.get('short'):
1059 filelist = mm + aa + dd
1059 filelist = mm + aa + dd
1060 match = dict.fromkeys(filelist).__contains__
1060 match = dict.fromkeys(filelist).__contains__
1061 else:
1061 else:
1062 filelist = None
1062 filelist = None
1063 match = util.always
1063 match = util.always
1064 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1064 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1065
1065
1066 # we might end up with files that were added between tip and
1066 # we might end up with files that were added between tip and
1067 # the dirstate parent, but then changed in the local dirstate.
1067 # the dirstate parent, but then changed in the local dirstate.
1068 # in this case, we want them to only show up in the added section
1068 # in this case, we want them to only show up in the added section
1069 for x in m:
1069 for x in m:
1070 if x not in aa:
1070 if x not in aa:
1071 mm.append(x)
1071 mm.append(x)
1072 # we might end up with files added by the local dirstate that
1072 # we might end up with files added by the local dirstate that
1073 # were deleted by the patch. In this case, they should only
1073 # were deleted by the patch. In this case, they should only
1074 # show up in the changed section.
1074 # show up in the changed section.
1075 for x in a:
1075 for x in a:
1076 if x in dd:
1076 if x in dd:
1077 del dd[dd.index(x)]
1077 del dd[dd.index(x)]
1078 mm.append(x)
1078 mm.append(x)
1079 else:
1079 else:
1080 aa.append(x)
1080 aa.append(x)
1081 # make sure any files deleted in the local dirstate
1081 # make sure any files deleted in the local dirstate
1082 # are not in the add or change column of the patch
1082 # are not in the add or change column of the patch
1083 forget = []
1083 forget = []
1084 for x in d + r:
1084 for x in d + r:
1085 if x in aa:
1085 if x in aa:
1086 del aa[aa.index(x)]
1086 del aa[aa.index(x)]
1087 forget.append(x)
1087 forget.append(x)
1088 continue
1088 continue
1089 elif x in mm:
1089 elif x in mm:
1090 del mm[mm.index(x)]
1090 del mm[mm.index(x)]
1091 dd.append(x)
1091 dd.append(x)
1092
1092
1093 m = util.unique(mm)
1093 m = util.unique(mm)
1094 r = util.unique(dd)
1094 r = util.unique(dd)
1095 a = util.unique(aa)
1095 a = util.unique(aa)
1096 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1096 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1097 filelist = util.unique(c[0] + c[1] + c[2])
1097 filelist = util.unique(c[0] + c[1] + c[2])
1098 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1098 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1099 fp=patchf, changes=c, opts=self.diffopts())
1099 fp=patchf, changes=c, opts=self.diffopts())
1100 patchf.close()
1100 patchf.close()
1101
1101
1102 repo.dirstate.setparents(*cparents)
1102 repo.dirstate.setparents(*cparents)
1103 copies = {}
1103 copies = {}
1104 for dst in a:
1104 for dst in a:
1105 src = repo.dirstate.copied(dst)
1105 src = repo.dirstate.copied(dst)
1106 if src is None:
1106 if src is None:
1107 continue
1107 continue
1108 copies.setdefault(src, []).append(dst)
1108 copies.setdefault(src, []).append(dst)
1109 repo.dirstate.update(a, 'a')
1109 repo.dirstate.update(a, 'a')
1110 # remember the copies between patchparent and tip
1110 # remember the copies between patchparent and tip
1111 # this may be slow, so don't do it if we're not tracking copies
1111 # this may be slow, so don't do it if we're not tracking copies
1112 if self.diffopts().git:
1112 if self.diffopts().git:
1113 for dst in aaa:
1113 for dst in aaa:
1114 f = repo.file(dst)
1114 f = repo.file(dst)
1115 src = f.renamed(man[dst])
1115 src = f.renamed(man[dst])
1116 if src:
1116 if src:
1117 copies[src[0]] = copies.get(dst, [])
1117 copies[src[0]] = copies.get(dst, [])
1118 if dst in a:
1118 if dst in a:
1119 copies[src[0]].append(dst)
1119 copies[src[0]].append(dst)
1120 # we can't copy a file created by the patch itself
1120 # we can't copy a file created by the patch itself
1121 if dst in copies:
1121 if dst in copies:
1122 del copies[dst]
1122 del copies[dst]
1123 for src, dsts in copies.iteritems():
1123 for src, dsts in copies.iteritems():
1124 for dst in dsts:
1124 for dst in dsts:
1125 repo.dirstate.copy(src, dst)
1125 repo.dirstate.copy(src, dst)
1126 repo.dirstate.update(r, 'r')
1126 repo.dirstate.update(r, 'r')
1127 # if the patch excludes a modified file, mark that file with mtime=0
1127 # if the patch excludes a modified file, mark that file with mtime=0
1128 # so status can see it.
1128 # so status can see it.
1129 mm = []
1129 mm = []
1130 for i in xrange(len(m)-1, -1, -1):
1130 for i in xrange(len(m)-1, -1, -1):
1131 if not matchfn(m[i]):
1131 if not matchfn(m[i]):
1132 mm.append(m[i])
1132 mm.append(m[i])
1133 del m[i]
1133 del m[i]
1134 repo.dirstate.update(m, 'n')
1134 repo.dirstate.update(m, 'n')
1135 repo.dirstate.update(mm, 'n', st_mtime=-1, st_size=-1)
1135 repo.dirstate.update(mm, 'n', st_mtime=-1, st_size=-1)
1136 repo.dirstate.forget(forget)
1136 repo.dirstate.forget(forget)
1137
1137
1138 if not msg:
1138 if not msg:
1139 if not message:
1139 if not message:
1140 message = "patch queue: %s\n" % patchfn
1140 message = "patch queue: %s\n" % patchfn
1141 else:
1141 else:
1142 message = "\n".join(message)
1142 message = "\n".join(message)
1143 else:
1143 else:
1144 message = msg
1144 message = msg
1145
1145
1146 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1146 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1147 n = repo.commit(filelist, message, changes[1], match=matchfn,
1147 n = repo.commit(filelist, message, changes[1], match=matchfn,
1148 force=1, wlock=wlock)
1148 force=1, wlock=wlock)
1149 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1149 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1150 self.applied_dirty = 1
1150 self.applied_dirty = 1
1151 self.removeundo(repo)
1151 self.removeundo(repo)
1152 else:
1152 else:
1153 self.printdiff(repo, patchparent, fp=patchf)
1153 self.printdiff(repo, patchparent, fp=patchf)
1154 patchf.close()
1154 patchf.close()
1155 added = repo.status()[1]
1155 added = repo.status()[1]
1156 for a in added:
1156 for a in added:
1157 f = repo.wjoin(a)
1157 f = repo.wjoin(a)
1158 try:
1158 try:
1159 os.unlink(f)
1159 os.unlink(f)
1160 except OSError, e:
1160 except OSError, e:
1161 if e.errno != errno.ENOENT:
1161 if e.errno != errno.ENOENT:
1162 raise
1162 raise
1163 try: os.removedirs(os.path.dirname(f))
1163 try: os.removedirs(os.path.dirname(f))
1164 except: pass
1164 except: pass
1165 # forget the file copies in the dirstate
1165 # forget the file copies in the dirstate
1166 # push should readd the files later on
1166 # push should readd the files later on
1167 repo.dirstate.forget(added)
1167 repo.dirstate.forget(added)
1168 self.pop(repo, force=True, wlock=wlock)
1168 self.pop(repo, force=True, wlock=wlock)
1169 self.push(repo, force=True, wlock=wlock)
1169 self.push(repo, force=True, wlock=wlock)
1170
1170
1171 def init(self, repo, create=False):
1171 def init(self, repo, create=False):
1172 if not create and os.path.isdir(self.path):
1172 if not create and os.path.isdir(self.path):
1173 raise util.Abort(_("patch queue directory already exists"))
1173 raise util.Abort(_("patch queue directory already exists"))
1174 try:
1174 try:
1175 os.mkdir(self.path)
1175 os.mkdir(self.path)
1176 except OSError, inst:
1176 except OSError, inst:
1177 if inst.errno != errno.EEXIST or not create:
1177 if inst.errno != errno.EEXIST or not create:
1178 raise
1178 raise
1179 if create:
1179 if create:
1180 return self.qrepo(create=True)
1180 return self.qrepo(create=True)
1181
1181
1182 def unapplied(self, repo, patch=None):
1182 def unapplied(self, repo, patch=None):
1183 if patch and patch not in self.series:
1183 if patch and patch not in self.series:
1184 raise util.Abort(_("patch %s is not in series file") % patch)
1184 raise util.Abort(_("patch %s is not in series file") % patch)
1185 if not patch:
1185 if not patch:
1186 start = self.series_end()
1186 start = self.series_end()
1187 else:
1187 else:
1188 start = self.series.index(patch) + 1
1188 start = self.series.index(patch) + 1
1189 unapplied = []
1189 unapplied = []
1190 for i in xrange(start, len(self.series)):
1190 for i in xrange(start, len(self.series)):
1191 pushable, reason = self.pushable(i)
1191 pushable, reason = self.pushable(i)
1192 if pushable:
1192 if pushable:
1193 unapplied.append((i, self.series[i]))
1193 unapplied.append((i, self.series[i]))
1194 self.explain_pushable(i)
1194 self.explain_pushable(i)
1195 return unapplied
1195 return unapplied
1196
1196
1197 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1197 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1198 summary=False):
1198 summary=False):
1199 def displayname(patchname):
1199 def displayname(patchname):
1200 if summary:
1200 if summary:
1201 msg = self.readheaders(patchname)[0]
1201 msg = self.readheaders(patchname)[0]
1202 msg = msg and ': ' + msg[0] or ': '
1202 msg = msg and ': ' + msg[0] or ': '
1203 else:
1203 else:
1204 msg = ''
1204 msg = ''
1205 return '%s%s' % (patchname, msg)
1205 return '%s%s' % (patchname, msg)
1206
1206
1207 applied = dict.fromkeys([p.name for p in self.applied])
1207 applied = dict.fromkeys([p.name for p in self.applied])
1208 if length is None:
1208 if length is None:
1209 length = len(self.series) - start
1209 length = len(self.series) - start
1210 if not missing:
1210 if not missing:
1211 for i in xrange(start, start+length):
1211 for i in xrange(start, start+length):
1212 patch = self.series[i]
1212 patch = self.series[i]
1213 if patch in applied:
1213 if patch in applied:
1214 stat = 'A'
1214 stat = 'A'
1215 elif self.pushable(i)[0]:
1215 elif self.pushable(i)[0]:
1216 stat = 'U'
1216 stat = 'U'
1217 else:
1217 else:
1218 stat = 'G'
1218 stat = 'G'
1219 pfx = ''
1219 pfx = ''
1220 if self.ui.verbose:
1220 if self.ui.verbose:
1221 pfx = '%d %s ' % (i, stat)
1221 pfx = '%d %s ' % (i, stat)
1222 elif status and status != stat:
1222 elif status and status != stat:
1223 continue
1223 continue
1224 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1224 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1225 else:
1225 else:
1226 msng_list = []
1226 msng_list = []
1227 for root, dirs, files in os.walk(self.path):
1227 for root, dirs, files in os.walk(self.path):
1228 d = root[len(self.path) + 1:]
1228 d = root[len(self.path) + 1:]
1229 for f in files:
1229 for f in files:
1230 fl = os.path.join(d, f)
1230 fl = os.path.join(d, f)
1231 if (fl not in self.series and
1231 if (fl not in self.series and
1232 fl not in (self.status_path, self.series_path,
1232 fl not in (self.status_path, self.series_path,
1233 self.guards_path)
1233 self.guards_path)
1234 and not fl.startswith('.')):
1234 and not fl.startswith('.')):
1235 msng_list.append(fl)
1235 msng_list.append(fl)
1236 msng_list.sort()
1236 msng_list.sort()
1237 for x in msng_list:
1237 for x in msng_list:
1238 pfx = self.ui.verbose and ('D ') or ''
1238 pfx = self.ui.verbose and ('D ') or ''
1239 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1239 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1240
1240
1241 def issaveline(self, l):
1241 def issaveline(self, l):
1242 if l.name == '.hg.patches.save.line':
1242 if l.name == '.hg.patches.save.line':
1243 return True
1243 return True
1244
1244
1245 def qrepo(self, create=False):
1245 def qrepo(self, create=False):
1246 if create or os.path.isdir(self.join(".hg")):
1246 if create or os.path.isdir(self.join(".hg")):
1247 return hg.repository(self.ui, path=self.path, create=create)
1247 return hg.repository(self.ui, path=self.path, create=create)
1248
1248
1249 def restore(self, repo, rev, delete=None, qupdate=None):
1249 def restore(self, repo, rev, delete=None, qupdate=None):
1250 c = repo.changelog.read(rev)
1250 c = repo.changelog.read(rev)
1251 desc = c[4].strip()
1251 desc = c[4].strip()
1252 lines = desc.splitlines()
1252 lines = desc.splitlines()
1253 i = 0
1253 i = 0
1254 datastart = None
1254 datastart = None
1255 series = []
1255 series = []
1256 applied = []
1256 applied = []
1257 qpp = None
1257 qpp = None
1258 for i in xrange(0, len(lines)):
1258 for i in xrange(0, len(lines)):
1259 if lines[i] == 'Patch Data:':
1259 if lines[i] == 'Patch Data:':
1260 datastart = i + 1
1260 datastart = i + 1
1261 elif lines[i].startswith('Dirstate:'):
1261 elif lines[i].startswith('Dirstate:'):
1262 l = lines[i].rstrip()
1262 l = lines[i].rstrip()
1263 l = l[10:].split(' ')
1263 l = l[10:].split(' ')
1264 qpp = [ hg.bin(x) for x in l ]
1264 qpp = [ hg.bin(x) for x in l ]
1265 elif datastart != None:
1265 elif datastart != None:
1266 l = lines[i].rstrip()
1266 l = lines[i].rstrip()
1267 se = statusentry(l)
1267 se = statusentry(l)
1268 file_ = se.name
1268 file_ = se.name
1269 if se.rev:
1269 if se.rev:
1270 applied.append(se)
1270 applied.append(se)
1271 else:
1271 else:
1272 series.append(file_)
1272 series.append(file_)
1273 if datastart == None:
1273 if datastart == None:
1274 self.ui.warn("No saved patch data found\n")
1274 self.ui.warn("No saved patch data found\n")
1275 return 1
1275 return 1
1276 self.ui.warn("restoring status: %s\n" % lines[0])
1276 self.ui.warn("restoring status: %s\n" % lines[0])
1277 self.full_series = series
1277 self.full_series = series
1278 self.applied = applied
1278 self.applied = applied
1279 self.parse_series()
1279 self.parse_series()
1280 self.series_dirty = 1
1280 self.series_dirty = 1
1281 self.applied_dirty = 1
1281 self.applied_dirty = 1
1282 heads = repo.changelog.heads()
1282 heads = repo.changelog.heads()
1283 if delete:
1283 if delete:
1284 if rev not in heads:
1284 if rev not in heads:
1285 self.ui.warn("save entry has children, leaving it alone\n")
1285 self.ui.warn("save entry has children, leaving it alone\n")
1286 else:
1286 else:
1287 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1287 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1288 pp = repo.dirstate.parents()
1288 pp = repo.dirstate.parents()
1289 if rev in pp:
1289 if rev in pp:
1290 update = True
1290 update = True
1291 else:
1291 else:
1292 update = False
1292 update = False
1293 self.strip(repo, rev, update=update, backup='strip')
1293 self.strip(repo, rev, update=update, backup='strip')
1294 if qpp:
1294 if qpp:
1295 self.ui.warn("saved queue repository parents: %s %s\n" %
1295 self.ui.warn("saved queue repository parents: %s %s\n" %
1296 (hg.short(qpp[0]), hg.short(qpp[1])))
1296 (hg.short(qpp[0]), hg.short(qpp[1])))
1297 if qupdate:
1297 if qupdate:
1298 print "queue directory updating"
1298 print "queue directory updating"
1299 r = self.qrepo()
1299 r = self.qrepo()
1300 if not r:
1300 if not r:
1301 self.ui.warn("Unable to load queue repository\n")
1301 self.ui.warn("Unable to load queue repository\n")
1302 return 1
1302 return 1
1303 hg.clean(r, qpp[0])
1303 hg.clean(r, qpp[0])
1304
1304
1305 def save(self, repo, msg=None):
1305 def save(self, repo, msg=None):
1306 if len(self.applied) == 0:
1306 if len(self.applied) == 0:
1307 self.ui.warn("save: no patches applied, exiting\n")
1307 self.ui.warn("save: no patches applied, exiting\n")
1308 return 1
1308 return 1
1309 if self.issaveline(self.applied[-1]):
1309 if self.issaveline(self.applied[-1]):
1310 self.ui.warn("status is already saved\n")
1310 self.ui.warn("status is already saved\n")
1311 return 1
1311 return 1
1312
1312
1313 ar = [ ':' + x for x in self.full_series ]
1313 ar = [ ':' + x for x in self.full_series ]
1314 if not msg:
1314 if not msg:
1315 msg = "hg patches saved state"
1315 msg = "hg patches saved state"
1316 else:
1316 else:
1317 msg = "hg patches: " + msg.rstrip('\r\n')
1317 msg = "hg patches: " + msg.rstrip('\r\n')
1318 r = self.qrepo()
1318 r = self.qrepo()
1319 if r:
1319 if r:
1320 pp = r.dirstate.parents()
1320 pp = r.dirstate.parents()
1321 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1321 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1322 msg += "\n\nPatch Data:\n"
1322 msg += "\n\nPatch Data:\n"
1323 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1323 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1324 "\n".join(ar) + '\n' or "")
1324 "\n".join(ar) + '\n' or "")
1325 n = repo.commit(None, text, user=None, force=1)
1325 n = repo.commit(None, text, user=None, force=1)
1326 if not n:
1326 if not n:
1327 self.ui.warn("repo commit failed\n")
1327 self.ui.warn("repo commit failed\n")
1328 return 1
1328 return 1
1329 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1329 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1330 self.applied_dirty = 1
1330 self.applied_dirty = 1
1331 self.removeundo(repo)
1331 self.removeundo(repo)
1332
1332
1333 def full_series_end(self):
1333 def full_series_end(self):
1334 if len(self.applied) > 0:
1334 if len(self.applied) > 0:
1335 p = self.applied[-1].name
1335 p = self.applied[-1].name
1336 end = self.find_series(p)
1336 end = self.find_series(p)
1337 if end == None:
1337 if end == None:
1338 return len(self.full_series)
1338 return len(self.full_series)
1339 return end + 1
1339 return end + 1
1340 return 0
1340 return 0
1341
1341
1342 def series_end(self, all_patches=False):
1342 def series_end(self, all_patches=False):
1343 """If all_patches is False, return the index of the next pushable patch
1343 """If all_patches is False, return the index of the next pushable patch
1344 in the series, or the series length. If all_patches is True, return the
1344 in the series, or the series length. If all_patches is True, return the
1345 index of the first patch past the last applied one.
1345 index of the first patch past the last applied one.
1346 """
1346 """
1347 end = 0
1347 end = 0
1348 def next(start):
1348 def next(start):
1349 if all_patches:
1349 if all_patches:
1350 return start
1350 return start
1351 i = start
1351 i = start
1352 while i < len(self.series):
1352 while i < len(self.series):
1353 p, reason = self.pushable(i)
1353 p, reason = self.pushable(i)
1354 if p:
1354 if p:
1355 break
1355 break
1356 self.explain_pushable(i)
1356 self.explain_pushable(i)
1357 i += 1
1357 i += 1
1358 return i
1358 return i
1359 if len(self.applied) > 0:
1359 if len(self.applied) > 0:
1360 p = self.applied[-1].name
1360 p = self.applied[-1].name
1361 try:
1361 try:
1362 end = self.series.index(p)
1362 end = self.series.index(p)
1363 except ValueError:
1363 except ValueError:
1364 return 0
1364 return 0
1365 return next(end + 1)
1365 return next(end + 1)
1366 return next(end)
1366 return next(end)
1367
1367
1368 def appliedname(self, index):
1368 def appliedname(self, index):
1369 pname = self.applied[index].name
1369 pname = self.applied[index].name
1370 if not self.ui.verbose:
1370 if not self.ui.verbose:
1371 p = pname
1371 p = pname
1372 else:
1372 else:
1373 p = str(self.series.index(pname)) + " " + pname
1373 p = str(self.series.index(pname)) + " " + pname
1374 return p
1374 return p
1375
1375
1376 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1376 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1377 force=None, git=False):
1377 force=None, git=False):
1378 def checkseries(patchname):
1378 def checkseries(patchname):
1379 if patchname in self.series:
1379 if patchname in self.series:
1380 raise util.Abort(_('patch %s is already in the series file')
1380 raise util.Abort(_('patch %s is already in the series file')
1381 % patchname)
1381 % patchname)
1382 def checkfile(patchname):
1382 def checkfile(patchname):
1383 if not force and os.path.exists(self.join(patchname)):
1383 if not force and os.path.exists(self.join(patchname)):
1384 raise util.Abort(_('patch "%s" already exists')
1384 raise util.Abort(_('patch "%s" already exists')
1385 % patchname)
1385 % patchname)
1386
1386
1387 if rev:
1387 if rev:
1388 if files:
1388 if files:
1389 raise util.Abort(_('option "-r" not valid when importing '
1389 raise util.Abort(_('option "-r" not valid when importing '
1390 'files'))
1390 'files'))
1391 rev = cmdutil.revrange(repo, rev)
1391 rev = cmdutil.revrange(repo, rev)
1392 rev.sort(lambda x, y: cmp(y, x))
1392 rev.sort(lambda x, y: cmp(y, x))
1393 if (len(files) > 1 or len(rev) > 1) and patchname:
1393 if (len(files) > 1 or len(rev) > 1) and patchname:
1394 raise util.Abort(_('option "-n" not valid when importing multiple '
1394 raise util.Abort(_('option "-n" not valid when importing multiple '
1395 'patches'))
1395 'patches'))
1396 i = 0
1396 i = 0
1397 added = []
1397 added = []
1398 if rev:
1398 if rev:
1399 # If mq patches are applied, we can only import revisions
1399 # If mq patches are applied, we can only import revisions
1400 # that form a linear path to qbase.
1400 # that form a linear path to qbase.
1401 # Otherwise, they should form a linear path to a head.
1401 # Otherwise, they should form a linear path to a head.
1402 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1402 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1403 if len(heads) > 1:
1403 if len(heads) > 1:
1404 raise util.Abort(_('revision %d is the root of more than one '
1404 raise util.Abort(_('revision %d is the root of more than one '
1405 'branch') % rev[-1])
1405 'branch') % rev[-1])
1406 if self.applied:
1406 if self.applied:
1407 base = revlog.hex(repo.changelog.node(rev[0]))
1407 base = revlog.hex(repo.changelog.node(rev[0]))
1408 if base in [n.rev for n in self.applied]:
1408 if base in [n.rev for n in self.applied]:
1409 raise util.Abort(_('revision %d is already managed')
1409 raise util.Abort(_('revision %d is already managed')
1410 % rev[0])
1410 % rev[0])
1411 if heads != [revlog.bin(self.applied[-1].rev)]:
1411 if heads != [revlog.bin(self.applied[-1].rev)]:
1412 raise util.Abort(_('revision %d is not the parent of '
1412 raise util.Abort(_('revision %d is not the parent of '
1413 'the queue') % rev[0])
1413 'the queue') % rev[0])
1414 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1414 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1415 lastparent = repo.changelog.parentrevs(base)[0]
1415 lastparent = repo.changelog.parentrevs(base)[0]
1416 else:
1416 else:
1417 if heads != [repo.changelog.node(rev[0])]:
1417 if heads != [repo.changelog.node(rev[0])]:
1418 raise util.Abort(_('revision %d has unmanaged children')
1418 raise util.Abort(_('revision %d has unmanaged children')
1419 % rev[0])
1419 % rev[0])
1420 lastparent = None
1420 lastparent = None
1421
1421
1422 if git:
1422 if git:
1423 self.diffopts().git = True
1423 self.diffopts().git = True
1424
1424
1425 for r in rev:
1425 for r in rev:
1426 p1, p2 = repo.changelog.parentrevs(r)
1426 p1, p2 = repo.changelog.parentrevs(r)
1427 n = repo.changelog.node(r)
1427 n = repo.changelog.node(r)
1428 if p2 != revlog.nullrev:
1428 if p2 != revlog.nullrev:
1429 raise util.Abort(_('cannot import merge revision %d') % r)
1429 raise util.Abort(_('cannot import merge revision %d') % r)
1430 if lastparent and lastparent != r:
1430 if lastparent and lastparent != r:
1431 raise util.Abort(_('revision %d is not the parent of %d')
1431 raise util.Abort(_('revision %d is not the parent of %d')
1432 % (r, lastparent))
1432 % (r, lastparent))
1433 lastparent = p1
1433 lastparent = p1
1434
1434
1435 if not patchname:
1435 if not patchname:
1436 patchname = normname('%d.diff' % r)
1436 patchname = normname('%d.diff' % r)
1437 checkseries(patchname)
1437 checkseries(patchname)
1438 checkfile(patchname)
1438 checkfile(patchname)
1439 self.full_series.insert(0, patchname)
1439 self.full_series.insert(0, patchname)
1440
1440
1441 patchf = self.opener(patchname, "w")
1441 patchf = self.opener(patchname, "w")
1442 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1442 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1443 patchf.close()
1443 patchf.close()
1444
1444
1445 se = statusentry(revlog.hex(n), patchname)
1445 se = statusentry(revlog.hex(n), patchname)
1446 self.applied.insert(0, se)
1446 self.applied.insert(0, se)
1447
1447
1448 added.append(patchname)
1448 added.append(patchname)
1449 patchname = None
1449 patchname = None
1450 self.parse_series()
1450 self.parse_series()
1451 self.applied_dirty = 1
1451 self.applied_dirty = 1
1452
1452
1453 for filename in files:
1453 for filename in files:
1454 if existing:
1454 if existing:
1455 if filename == '-':
1455 if filename == '-':
1456 raise util.Abort(_('-e is incompatible with import from -'))
1456 raise util.Abort(_('-e is incompatible with import from -'))
1457 if not patchname:
1457 if not patchname:
1458 patchname = normname(filename)
1458 patchname = normname(filename)
1459 if not os.path.isfile(self.join(patchname)):
1459 if not os.path.isfile(self.join(patchname)):
1460 raise util.Abort(_("patch %s does not exist") % patchname)
1460 raise util.Abort(_("patch %s does not exist") % patchname)
1461 else:
1461 else:
1462 try:
1462 try:
1463 if filename == '-':
1463 if filename == '-':
1464 if not patchname:
1464 if not patchname:
1465 raise util.Abort(_('need --name to import a patch from -'))
1465 raise util.Abort(_('need --name to import a patch from -'))
1466 text = sys.stdin.read()
1466 text = sys.stdin.read()
1467 else:
1467 else:
1468 text = file(filename).read()
1468 text = file(filename).read()
1469 except IOError:
1469 except IOError:
1470 raise util.Abort(_("unable to read %s") % patchname)
1470 raise util.Abort(_("unable to read %s") % patchname)
1471 if not patchname:
1471 if not patchname:
1472 patchname = normname(os.path.basename(filename))
1472 patchname = normname(os.path.basename(filename))
1473 checkfile(patchname)
1473 checkfile(patchname)
1474 patchf = self.opener(patchname, "w")
1474 patchf = self.opener(patchname, "w")
1475 patchf.write(text)
1475 patchf.write(text)
1476 checkseries(patchname)
1476 checkseries(patchname)
1477 index = self.full_series_end() + i
1477 index = self.full_series_end() + i
1478 self.full_series[index:index] = [patchname]
1478 self.full_series[index:index] = [patchname]
1479 self.parse_series()
1479 self.parse_series()
1480 self.ui.warn("adding %s to series file\n" % patchname)
1480 self.ui.warn("adding %s to series file\n" % patchname)
1481 i += 1
1481 i += 1
1482 added.append(patchname)
1482 added.append(patchname)
1483 patchname = None
1483 patchname = None
1484 self.series_dirty = 1
1484 self.series_dirty = 1
1485 qrepo = self.qrepo()
1485 qrepo = self.qrepo()
1486 if qrepo:
1486 if qrepo:
1487 qrepo.add(added)
1487 qrepo.add(added)
1488
1488
1489 def delete(ui, repo, *patches, **opts):
1489 def delete(ui, repo, *patches, **opts):
1490 """remove patches from queue
1490 """remove patches from queue
1491
1491
1492 With --rev, mq will stop managing the named revisions. The
1492 With --rev, mq will stop managing the named revisions. The
1493 patches must be applied and at the base of the stack. This option
1493 patches must be applied and at the base of the stack. This option
1494 is useful when the patches have been applied upstream.
1494 is useful when the patches have been applied upstream.
1495
1495
1496 Otherwise, the patches must not be applied.
1496 Otherwise, the patches must not be applied.
1497
1497
1498 With --keep, the patch files are preserved in the patch directory."""
1498 With --keep, the patch files are preserved in the patch directory."""
1499 q = repo.mq
1499 q = repo.mq
1500 q.delete(repo, patches, opts)
1500 q.delete(repo, patches, opts)
1501 q.save_dirty()
1501 q.save_dirty()
1502 return 0
1502 return 0
1503
1503
1504 def applied(ui, repo, patch=None, **opts):
1504 def applied(ui, repo, patch=None, **opts):
1505 """print the patches already applied"""
1505 """print the patches already applied"""
1506 q = repo.mq
1506 q = repo.mq
1507 if patch:
1507 if patch:
1508 if patch not in q.series:
1508 if patch not in q.series:
1509 raise util.Abort(_("patch %s is not in series file") % patch)
1509 raise util.Abort(_("patch %s is not in series file") % patch)
1510 end = q.series.index(patch) + 1
1510 end = q.series.index(patch) + 1
1511 else:
1511 else:
1512 end = q.series_end(True)
1512 end = q.series_end(True)
1513 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1513 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1514
1514
1515 def unapplied(ui, repo, patch=None, **opts):
1515 def unapplied(ui, repo, patch=None, **opts):
1516 """print the patches not yet applied"""
1516 """print the patches not yet applied"""
1517 q = repo.mq
1517 q = repo.mq
1518 if patch:
1518 if patch:
1519 if patch not in q.series:
1519 if patch not in q.series:
1520 raise util.Abort(_("patch %s is not in series file") % patch)
1520 raise util.Abort(_("patch %s is not in series file") % patch)
1521 start = q.series.index(patch) + 1
1521 start = q.series.index(patch) + 1
1522 else:
1522 else:
1523 start = q.series_end(True)
1523 start = q.series_end(True)
1524 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1524 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1525
1525
1526 def qimport(ui, repo, *filename, **opts):
1526 def qimport(ui, repo, *filename, **opts):
1527 """import a patch
1527 """import a patch
1528
1528
1529 The patch will have the same name as its source file unless you
1529 The patch will have the same name as its source file unless you
1530 give it a new one with --name.
1530 give it a new one with --name.
1531
1531
1532 You can register an existing patch inside the patch directory
1532 You can register an existing patch inside the patch directory
1533 with the --existing flag.
1533 with the --existing flag.
1534
1534
1535 With --force, an existing patch of the same name will be overwritten.
1535 With --force, an existing patch of the same name will be overwritten.
1536
1536
1537 An existing changeset may be placed under mq control with --rev
1537 An existing changeset may be placed under mq control with --rev
1538 (e.g. qimport --rev tip -n patch will place tip under mq control).
1538 (e.g. qimport --rev tip -n patch will place tip under mq control).
1539 With --git, patches imported with --rev will use the git diff
1539 With --git, patches imported with --rev will use the git diff
1540 format.
1540 format.
1541 """
1541 """
1542 q = repo.mq
1542 q = repo.mq
1543 q.qimport(repo, filename, patchname=opts['name'],
1543 q.qimport(repo, filename, patchname=opts['name'],
1544 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1544 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1545 git=opts['git'])
1545 git=opts['git'])
1546 q.save_dirty()
1546 q.save_dirty()
1547 return 0
1547 return 0
1548
1548
1549 def init(ui, repo, **opts):
1549 def init(ui, repo, **opts):
1550 """init a new queue repository
1550 """init a new queue repository
1551
1551
1552 The queue repository is unversioned by default. If -c is
1552 The queue repository is unversioned by default. If -c is
1553 specified, qinit will create a separate nested repository
1553 specified, qinit will create a separate nested repository
1554 for patches. Use qcommit to commit changes to this queue
1554 for patches. Use qcommit to commit changes to this queue
1555 repository."""
1555 repository."""
1556 q = repo.mq
1556 q = repo.mq
1557 r = q.init(repo, create=opts['create_repo'])
1557 r = q.init(repo, create=opts['create_repo'])
1558 q.save_dirty()
1558 q.save_dirty()
1559 if r:
1559 if r:
1560 if not os.path.exists(r.wjoin('.hgignore')):
1560 if not os.path.exists(r.wjoin('.hgignore')):
1561 fp = r.wopener('.hgignore', 'w')
1561 fp = r.wopener('.hgignore', 'w')
1562 fp.write('syntax: glob\n')
1562 fp.write('syntax: glob\n')
1563 fp.write('status\n')
1563 fp.write('status\n')
1564 fp.write('guards\n')
1564 fp.write('guards\n')
1565 fp.close()
1565 fp.close()
1566 if not os.path.exists(r.wjoin('series')):
1566 if not os.path.exists(r.wjoin('series')):
1567 r.wopener('series', 'w').close()
1567 r.wopener('series', 'w').close()
1568 r.add(['.hgignore', 'series'])
1568 r.add(['.hgignore', 'series'])
1569 commands.add(ui, r)
1569 commands.add(ui, r)
1570 return 0
1570 return 0
1571
1571
1572 def clone(ui, source, dest=None, **opts):
1572 def clone(ui, source, dest=None, **opts):
1573 '''clone main and patch repository at same time
1573 '''clone main and patch repository at same time
1574
1574
1575 If source is local, destination will have no patches applied. If
1575 If source is local, destination will have no patches applied. If
1576 source is remote, this command can not check if patches are
1576 source is remote, this command can not check if patches are
1577 applied in source, so cannot guarantee that patches are not
1577 applied in source, so cannot guarantee that patches are not
1578 applied in destination. If you clone remote repository, be sure
1578 applied in destination. If you clone remote repository, be sure
1579 before that it has no patches applied.
1579 before that it has no patches applied.
1580
1580
1581 Source patch repository is looked for in <src>/.hg/patches by
1581 Source patch repository is looked for in <src>/.hg/patches by
1582 default. Use -p <url> to change.
1582 default. Use -p <url> to change.
1583 '''
1583 '''
1584 cmdutil.setremoteconfig(ui, opts)
1584 cmdutil.setremoteconfig(ui, opts)
1585 if dest is None:
1585 if dest is None:
1586 dest = hg.defaultdest(source)
1586 dest = hg.defaultdest(source)
1587 sr = hg.repository(ui, ui.expandpath(source))
1587 sr = hg.repository(ui, ui.expandpath(source))
1588 qbase, destrev = None, None
1588 qbase, destrev = None, None
1589 if sr.local():
1589 if sr.local():
1590 if sr.mq.applied:
1590 if sr.mq.applied:
1591 qbase = revlog.bin(sr.mq.applied[0].rev)
1591 qbase = revlog.bin(sr.mq.applied[0].rev)
1592 if not hg.islocal(dest):
1592 if not hg.islocal(dest):
1593 heads = dict.fromkeys(sr.heads())
1593 heads = dict.fromkeys(sr.heads())
1594 for h in sr.heads(qbase):
1594 for h in sr.heads(qbase):
1595 del heads[h]
1595 del heads[h]
1596 destrev = heads.keys()
1596 destrev = heads.keys()
1597 destrev.append(sr.changelog.parents(qbase)[0])
1597 destrev.append(sr.changelog.parents(qbase)[0])
1598 ui.note(_('cloning main repo\n'))
1598 ui.note(_('cloning main repo\n'))
1599 sr, dr = hg.clone(ui, sr.url(), dest,
1599 sr, dr = hg.clone(ui, sr.url(), dest,
1600 pull=opts['pull'],
1600 pull=opts['pull'],
1601 rev=destrev,
1601 rev=destrev,
1602 update=False,
1602 update=False,
1603 stream=opts['uncompressed'])
1603 stream=opts['uncompressed'])
1604 ui.note(_('cloning patch repo\n'))
1604 ui.note(_('cloning patch repo\n'))
1605 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1605 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1606 dr.url() + '/.hg/patches',
1606 dr.url() + '/.hg/patches',
1607 pull=opts['pull'],
1607 pull=opts['pull'],
1608 update=not opts['noupdate'],
1608 update=not opts['noupdate'],
1609 stream=opts['uncompressed'])
1609 stream=opts['uncompressed'])
1610 if dr.local():
1610 if dr.local():
1611 if qbase:
1611 if qbase:
1612 ui.note(_('stripping applied patches from destination repo\n'))
1612 ui.note(_('stripping applied patches from destination repo\n'))
1613 dr.mq.strip(dr, qbase, update=False, backup=None)
1613 dr.mq.strip(dr, qbase, update=False, backup=None)
1614 if not opts['noupdate']:
1614 if not opts['noupdate']:
1615 ui.note(_('updating destination repo\n'))
1615 ui.note(_('updating destination repo\n'))
1616 hg.update(dr, dr.changelog.tip())
1616 hg.update(dr, dr.changelog.tip())
1617
1617
1618 def commit(ui, repo, *pats, **opts):
1618 def commit(ui, repo, *pats, **opts):
1619 """commit changes in the queue repository"""
1619 """commit changes in the queue repository"""
1620 q = repo.mq
1620 q = repo.mq
1621 r = q.qrepo()
1621 r = q.qrepo()
1622 if not r: raise util.Abort('no queue repository')
1622 if not r: raise util.Abort('no queue repository')
1623 commands.commit(r.ui, r, *pats, **opts)
1623 commands.commit(r.ui, r, *pats, **opts)
1624
1624
1625 def series(ui, repo, **opts):
1625 def series(ui, repo, **opts):
1626 """print the entire series file"""
1626 """print the entire series file"""
1627 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1627 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1628 return 0
1628 return 0
1629
1629
1630 def top(ui, repo, **opts):
1630 def top(ui, repo, **opts):
1631 """print the name of the current patch"""
1631 """print the name of the current patch"""
1632 q = repo.mq
1632 q = repo.mq
1633 t = q.applied and q.series_end(True) or 0
1633 t = q.applied and q.series_end(True) or 0
1634 if t:
1634 if t:
1635 return q.qseries(repo, start=t-1, length=1, status='A',
1635 return q.qseries(repo, start=t-1, length=1, status='A',
1636 summary=opts.get('summary'))
1636 summary=opts.get('summary'))
1637 else:
1637 else:
1638 ui.write("No patches applied\n")
1638 ui.write("No patches applied\n")
1639 return 1
1639 return 1
1640
1640
1641 def next(ui, repo, **opts):
1641 def next(ui, repo, **opts):
1642 """print the name of the next patch"""
1642 """print the name of the next patch"""
1643 q = repo.mq
1643 q = repo.mq
1644 end = q.series_end()
1644 end = q.series_end()
1645 if end == len(q.series):
1645 if end == len(q.series):
1646 ui.write("All patches applied\n")
1646 ui.write("All patches applied\n")
1647 return 1
1647 return 1
1648 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1648 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1649
1649
1650 def prev(ui, repo, **opts):
1650 def prev(ui, repo, **opts):
1651 """print the name of the previous patch"""
1651 """print the name of the previous patch"""
1652 q = repo.mq
1652 q = repo.mq
1653 l = len(q.applied)
1653 l = len(q.applied)
1654 if l == 1:
1654 if l == 1:
1655 ui.write("Only one patch applied\n")
1655 ui.write("Only one patch applied\n")
1656 return 1
1656 return 1
1657 if not l:
1657 if not l:
1658 ui.write("No patches applied\n")
1658 ui.write("No patches applied\n")
1659 return 1
1659 return 1
1660 return q.qseries(repo, start=l-2, length=1, status='A',
1660 return q.qseries(repo, start=l-2, length=1, status='A',
1661 summary=opts.get('summary'))
1661 summary=opts.get('summary'))
1662
1662
1663 def new(ui, repo, patch, **opts):
1663 def new(ui, repo, patch, **opts):
1664 """create a new patch
1664 """create a new patch
1665
1665
1666 qnew creates a new patch on top of the currently-applied patch
1666 qnew creates a new patch on top of the currently-applied patch
1667 (if any). It will refuse to run if there are any outstanding
1667 (if any). It will refuse to run if there are any outstanding
1668 changes unless -f is specified, in which case the patch will
1668 changes unless -f is specified, in which case the patch will
1669 be initialised with them.
1669 be initialised with them.
1670
1670
1671 -e, -m or -l set the patch header as well as the commit message.
1671 -e, -m or -l set the patch header as well as the commit message.
1672 If none is specified, the patch header is empty and the
1672 If none is specified, the patch header is empty and the
1673 commit message is 'New patch: PATCH'"""
1673 commit message is 'New patch: PATCH'"""
1674 q = repo.mq
1674 q = repo.mq
1675 message = cmdutil.logmessage(opts)
1675 message = cmdutil.logmessage(opts)
1676 if opts['edit']:
1676 if opts['edit']:
1677 message = ui.edit(message, ui.username())
1677 message = ui.edit(message, ui.username())
1678 q.new(repo, patch, msg=message, force=opts['force'])
1678 q.new(repo, patch, msg=message, force=opts['force'])
1679 q.save_dirty()
1679 q.save_dirty()
1680 return 0
1680 return 0
1681
1681
1682 def refresh(ui, repo, *pats, **opts):
1682 def refresh(ui, repo, *pats, **opts):
1683 """update the current patch
1683 """update the current patch
1684
1684
1685 If any file patterns are provided, the refreshed patch will contain only
1685 If any file patterns are provided, the refreshed patch will contain only
1686 the modifications that match those patterns; the remaining modifications
1686 the modifications that match those patterns; the remaining modifications
1687 will remain in the working directory.
1687 will remain in the working directory.
1688
1688
1689 hg add/remove/copy/rename work as usual, though you might want to use
1689 hg add/remove/copy/rename work as usual, though you might want to use
1690 git-style patches (--git or [diff] git=1) to track copies and renames.
1690 git-style patches (--git or [diff] git=1) to track copies and renames.
1691 """
1691 """
1692 q = repo.mq
1692 q = repo.mq
1693 message = cmdutil.logmessage(opts)
1693 message = cmdutil.logmessage(opts)
1694 if opts['edit']:
1694 if opts['edit']:
1695 if message:
1695 if message:
1696 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1696 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1697 patch = q.applied[-1].name
1697 patch = q.applied[-1].name
1698 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1698 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1699 message = ui.edit('\n'.join(message), user or ui.username())
1699 message = ui.edit('\n'.join(message), user or ui.username())
1700 ret = q.refresh(repo, pats, msg=message, **opts)
1700 ret = q.refresh(repo, pats, msg=message, **opts)
1701 q.save_dirty()
1701 q.save_dirty()
1702 return ret
1702 return ret
1703
1703
1704 def diff(ui, repo, *pats, **opts):
1704 def diff(ui, repo, *pats, **opts):
1705 """diff of the current patch"""
1705 """diff of the current patch"""
1706 repo.mq.diff(repo, pats, opts)
1706 repo.mq.diff(repo, pats, opts)
1707 return 0
1707 return 0
1708
1708
1709 def fold(ui, repo, *files, **opts):
1709 def fold(ui, repo, *files, **opts):
1710 """fold the named patches into the current patch
1710 """fold the named patches into the current patch
1711
1711
1712 Patches must not yet be applied. Each patch will be successively
1712 Patches must not yet be applied. Each patch will be successively
1713 applied to the current patch in the order given. If all the
1713 applied to the current patch in the order given. If all the
1714 patches apply successfully, the current patch will be refreshed
1714 patches apply successfully, the current patch will be refreshed
1715 with the new cumulative patch, and the folded patches will
1715 with the new cumulative patch, and the folded patches will
1716 be deleted. With -k/--keep, the folded patch files will not
1716 be deleted. With -k/--keep, the folded patch files will not
1717 be removed afterwards.
1717 be removed afterwards.
1718
1718
1719 The header for each folded patch will be concatenated with
1719 The header for each folded patch will be concatenated with
1720 the current patch header, separated by a line of '* * *'."""
1720 the current patch header, separated by a line of '* * *'."""
1721
1721
1722 q = repo.mq
1722 q = repo.mq
1723
1723
1724 if not files:
1724 if not files:
1725 raise util.Abort(_('qfold requires at least one patch name'))
1725 raise util.Abort(_('qfold requires at least one patch name'))
1726 if not q.check_toppatch(repo):
1726 if not q.check_toppatch(repo):
1727 raise util.Abort(_('No patches applied'))
1727 raise util.Abort(_('No patches applied'))
1728
1728
1729 message = cmdutil.logmessage(opts)
1729 message = cmdutil.logmessage(opts)
1730 if opts['edit']:
1730 if opts['edit']:
1731 if message:
1731 if message:
1732 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1732 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1733
1733
1734 parent = q.lookup('qtip')
1734 parent = q.lookup('qtip')
1735 patches = []
1735 patches = []
1736 messages = []
1736 messages = []
1737 for f in files:
1737 for f in files:
1738 p = q.lookup(f)
1738 p = q.lookup(f)
1739 if p in patches or p == parent:
1739 if p in patches or p == parent:
1740 ui.warn(_('Skipping already folded patch %s') % p)
1740 ui.warn(_('Skipping already folded patch %s') % p)
1741 if q.isapplied(p):
1741 if q.isapplied(p):
1742 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1742 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1743 patches.append(p)
1743 patches.append(p)
1744
1744
1745 for p in patches:
1745 for p in patches:
1746 if not message:
1746 if not message:
1747 messages.append(q.readheaders(p)[0])
1747 messages.append(q.readheaders(p)[0])
1748 pf = q.join(p)
1748 pf = q.join(p)
1749 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1749 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1750 if not patchsuccess:
1750 if not patchsuccess:
1751 raise util.Abort(_('Error folding patch %s') % p)
1751 raise util.Abort(_('Error folding patch %s') % p)
1752 patch.updatedir(ui, repo, files)
1752 patch.updatedir(ui, repo, files)
1753
1753
1754 if not message:
1754 if not message:
1755 message, comments, user = q.readheaders(parent)[0:3]
1755 message, comments, user = q.readheaders(parent)[0:3]
1756 for msg in messages:
1756 for msg in messages:
1757 message.append('* * *')
1757 message.append('* * *')
1758 message.extend(msg)
1758 message.extend(msg)
1759 message = '\n'.join(message)
1759 message = '\n'.join(message)
1760
1760
1761 if opts['edit']:
1761 if opts['edit']:
1762 message = ui.edit(message, user or ui.username())
1762 message = ui.edit(message, user or ui.username())
1763
1763
1764 q.refresh(repo, msg=message)
1764 q.refresh(repo, msg=message)
1765 q.delete(repo, patches, opts)
1765 q.delete(repo, patches, opts)
1766 q.save_dirty()
1766 q.save_dirty()
1767
1767
1768 def goto(ui, repo, patch, **opts):
1768 def goto(ui, repo, patch, **opts):
1769 '''push or pop patches until named patch is at top of stack'''
1769 '''push or pop patches until named patch is at top of stack'''
1770 q = repo.mq
1770 q = repo.mq
1771 patch = q.lookup(patch)
1771 patch = q.lookup(patch)
1772 if q.isapplied(patch):
1772 if q.isapplied(patch):
1773 ret = q.pop(repo, patch, force=opts['force'])
1773 ret = q.pop(repo, patch, force=opts['force'])
1774 else:
1774 else:
1775 ret = q.push(repo, patch, force=opts['force'])
1775 ret = q.push(repo, patch, force=opts['force'])
1776 q.save_dirty()
1776 q.save_dirty()
1777 return ret
1777 return ret
1778
1778
1779 def guard(ui, repo, *args, **opts):
1779 def guard(ui, repo, *args, **opts):
1780 '''set or print guards for a patch
1780 '''set or print guards for a patch
1781
1781
1782 Guards control whether a patch can be pushed. A patch with no
1782 Guards control whether a patch can be pushed. A patch with no
1783 guards is always pushed. A patch with a positive guard ("+foo") is
1783 guards is always pushed. A patch with a positive guard ("+foo") is
1784 pushed only if the qselect command has activated it. A patch with
1784 pushed only if the qselect command has activated it. A patch with
1785 a negative guard ("-foo") is never pushed if the qselect command
1785 a negative guard ("-foo") is never pushed if the qselect command
1786 has activated it.
1786 has activated it.
1787
1787
1788 With no arguments, print the currently active guards.
1788 With no arguments, print the currently active guards.
1789 With arguments, set guards for the named patch.
1789 With arguments, set guards for the named patch.
1790
1790
1791 To set a negative guard "-foo" on topmost patch ("--" is needed so
1791 To set a negative guard "-foo" on topmost patch ("--" is needed so
1792 hg will not interpret "-foo" as an option):
1792 hg will not interpret "-foo" as an option):
1793 hg qguard -- -foo
1793 hg qguard -- -foo
1794
1794
1795 To set guards on another patch:
1795 To set guards on another patch:
1796 hg qguard other.patch +2.6.17 -stable
1796 hg qguard other.patch +2.6.17 -stable
1797 '''
1797 '''
1798 def status(idx):
1798 def status(idx):
1799 guards = q.series_guards[idx] or ['unguarded']
1799 guards = q.series_guards[idx] or ['unguarded']
1800 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1800 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1801 q = repo.mq
1801 q = repo.mq
1802 patch = None
1802 patch = None
1803 args = list(args)
1803 args = list(args)
1804 if opts['list']:
1804 if opts['list']:
1805 if args or opts['none']:
1805 if args or opts['none']:
1806 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1806 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1807 for i in xrange(len(q.series)):
1807 for i in xrange(len(q.series)):
1808 status(i)
1808 status(i)
1809 return
1809 return
1810 if not args or args[0][0:1] in '-+':
1810 if not args or args[0][0:1] in '-+':
1811 if not q.applied:
1811 if not q.applied:
1812 raise util.Abort(_('no patches applied'))
1812 raise util.Abort(_('no patches applied'))
1813 patch = q.applied[-1].name
1813 patch = q.applied[-1].name
1814 if patch is None and args[0][0:1] not in '-+':
1814 if patch is None and args[0][0:1] not in '-+':
1815 patch = args.pop(0)
1815 patch = args.pop(0)
1816 if patch is None:
1816 if patch is None:
1817 raise util.Abort(_('no patch to work with'))
1817 raise util.Abort(_('no patch to work with'))
1818 if args or opts['none']:
1818 if args or opts['none']:
1819 idx = q.find_series(patch)
1819 idx = q.find_series(patch)
1820 if idx is None:
1820 if idx is None:
1821 raise util.Abort(_('no patch named %s') % patch)
1821 raise util.Abort(_('no patch named %s') % patch)
1822 q.set_guards(idx, args)
1822 q.set_guards(idx, args)
1823 q.save_dirty()
1823 q.save_dirty()
1824 else:
1824 else:
1825 status(q.series.index(q.lookup(patch)))
1825 status(q.series.index(q.lookup(patch)))
1826
1826
1827 def header(ui, repo, patch=None):
1827 def header(ui, repo, patch=None):
1828 """Print the header of the topmost or specified patch"""
1828 """Print the header of the topmost or specified patch"""
1829 q = repo.mq
1829 q = repo.mq
1830
1830
1831 if patch:
1831 if patch:
1832 patch = q.lookup(patch)
1832 patch = q.lookup(patch)
1833 else:
1833 else:
1834 if not q.applied:
1834 if not q.applied:
1835 ui.write('No patches applied\n')
1835 ui.write('No patches applied\n')
1836 return 1
1836 return 1
1837 patch = q.lookup('qtip')
1837 patch = q.lookup('qtip')
1838 message = repo.mq.readheaders(patch)[0]
1838 message = repo.mq.readheaders(patch)[0]
1839
1839
1840 ui.write('\n'.join(message) + '\n')
1840 ui.write('\n'.join(message) + '\n')
1841
1841
1842 def lastsavename(path):
1842 def lastsavename(path):
1843 (directory, base) = os.path.split(path)
1843 (directory, base) = os.path.split(path)
1844 names = os.listdir(directory)
1844 names = os.listdir(directory)
1845 namere = re.compile("%s.([0-9]+)" % base)
1845 namere = re.compile("%s.([0-9]+)" % base)
1846 maxindex = None
1846 maxindex = None
1847 maxname = None
1847 maxname = None
1848 for f in names:
1848 for f in names:
1849 m = namere.match(f)
1849 m = namere.match(f)
1850 if m:
1850 if m:
1851 index = int(m.group(1))
1851 index = int(m.group(1))
1852 if maxindex == None or index > maxindex:
1852 if maxindex == None or index > maxindex:
1853 maxindex = index
1853 maxindex = index
1854 maxname = f
1854 maxname = f
1855 if maxname:
1855 if maxname:
1856 return (os.path.join(directory, maxname), maxindex)
1856 return (os.path.join(directory, maxname), maxindex)
1857 return (None, None)
1857 return (None, None)
1858
1858
1859 def savename(path):
1859 def savename(path):
1860 (last, index) = lastsavename(path)
1860 (last, index) = lastsavename(path)
1861 if last is None:
1861 if last is None:
1862 index = 0
1862 index = 0
1863 newpath = path + ".%d" % (index + 1)
1863 newpath = path + ".%d" % (index + 1)
1864 return newpath
1864 return newpath
1865
1865
1866 def push(ui, repo, patch=None, **opts):
1866 def push(ui, repo, patch=None, **opts):
1867 """push the next patch onto the stack"""
1867 """push the next patch onto the stack"""
1868 q = repo.mq
1868 q = repo.mq
1869 mergeq = None
1869 mergeq = None
1870
1870
1871 if opts['all']:
1871 if opts['all']:
1872 if not q.series:
1872 if not q.series:
1873 ui.warn(_('no patches in series\n'))
1873 ui.warn(_('no patches in series\n'))
1874 return 0
1874 return 0
1875 patch = q.series[-1]
1875 patch = q.series[-1]
1876 if opts['merge']:
1876 if opts['merge']:
1877 if opts['name']:
1877 if opts['name']:
1878 newpath = opts['name']
1878 newpath = opts['name']
1879 else:
1879 else:
1880 newpath, i = lastsavename(q.path)
1880 newpath, i = lastsavename(q.path)
1881 if not newpath:
1881 if not newpath:
1882 ui.warn("no saved queues found, please use -n\n")
1882 ui.warn("no saved queues found, please use -n\n")
1883 return 1
1883 return 1
1884 mergeq = queue(ui, repo.join(""), newpath)
1884 mergeq = queue(ui, repo.join(""), newpath)
1885 ui.warn("merging with queue at: %s\n" % mergeq.path)
1885 ui.warn("merging with queue at: %s\n" % mergeq.path)
1886 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1886 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1887 mergeq=mergeq)
1887 mergeq=mergeq)
1888 return ret
1888 return ret
1889
1889
1890 def pop(ui, repo, patch=None, **opts):
1890 def pop(ui, repo, patch=None, **opts):
1891 """pop the current patch off the stack"""
1891 """pop the current patch off the stack"""
1892 localupdate = True
1892 localupdate = True
1893 if opts['name']:
1893 if opts['name']:
1894 q = queue(ui, repo.join(""), repo.join(opts['name']))
1894 q = queue(ui, repo.join(""), repo.join(opts['name']))
1895 ui.warn('using patch queue: %s\n' % q.path)
1895 ui.warn('using patch queue: %s\n' % q.path)
1896 localupdate = False
1896 localupdate = False
1897 else:
1897 else:
1898 q = repo.mq
1898 q = repo.mq
1899 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1899 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1900 all=opts['all'])
1900 all=opts['all'])
1901 q.save_dirty()
1901 q.save_dirty()
1902 return ret
1902 return ret
1903
1903
1904 def rename(ui, repo, patch, name=None, **opts):
1904 def rename(ui, repo, patch, name=None, **opts):
1905 """rename a patch
1905 """rename a patch
1906
1906
1907 With one argument, renames the current patch to PATCH1.
1907 With one argument, renames the current patch to PATCH1.
1908 With two arguments, renames PATCH1 to PATCH2."""
1908 With two arguments, renames PATCH1 to PATCH2."""
1909
1909
1910 q = repo.mq
1910 q = repo.mq
1911
1911
1912 if not name:
1912 if not name:
1913 name = patch
1913 name = patch
1914 patch = None
1914 patch = None
1915
1915
1916 if patch:
1916 if patch:
1917 patch = q.lookup(patch)
1917 patch = q.lookup(patch)
1918 else:
1918 else:
1919 if not q.applied:
1919 if not q.applied:
1920 ui.write(_('No patches applied\n'))
1920 ui.write(_('No patches applied\n'))
1921 return
1921 return
1922 patch = q.lookup('qtip')
1922 patch = q.lookup('qtip')
1923 absdest = q.join(name)
1923 absdest = q.join(name)
1924 if os.path.isdir(absdest):
1924 if os.path.isdir(absdest):
1925 name = normname(os.path.join(name, os.path.basename(patch)))
1925 name = normname(os.path.join(name, os.path.basename(patch)))
1926 absdest = q.join(name)
1926 absdest = q.join(name)
1927 if os.path.exists(absdest):
1927 if os.path.exists(absdest):
1928 raise util.Abort(_('%s already exists') % absdest)
1928 raise util.Abort(_('%s already exists') % absdest)
1929
1929
1930 if name in q.series:
1930 if name in q.series:
1931 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1931 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1932
1932
1933 if ui.verbose:
1933 if ui.verbose:
1934 ui.write('Renaming %s to %s\n' % (patch, name))
1934 ui.write('Renaming %s to %s\n' % (patch, name))
1935 i = q.find_series(patch)
1935 i = q.find_series(patch)
1936 guards = q.guard_re.findall(q.full_series[i])
1936 guards = q.guard_re.findall(q.full_series[i])
1937 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1937 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1938 q.parse_series()
1938 q.parse_series()
1939 q.series_dirty = 1
1939 q.series_dirty = 1
1940
1940
1941 info = q.isapplied(patch)
1941 info = q.isapplied(patch)
1942 if info:
1942 if info:
1943 q.applied[info[0]] = statusentry(info[1], name)
1943 q.applied[info[0]] = statusentry(info[1], name)
1944 q.applied_dirty = 1
1944 q.applied_dirty = 1
1945
1945
1946 util.rename(q.join(patch), absdest)
1946 util.rename(q.join(patch), absdest)
1947 r = q.qrepo()
1947 r = q.qrepo()
1948 if r:
1948 if r:
1949 wlock = r.wlock()
1949 wlock = r.wlock()
1950 if r.dirstate.state(name) == 'r':
1950 if r.dirstate.state(name) == 'r':
1951 r.undelete([name], wlock)
1951 r.undelete([name], wlock)
1952 r.copy(patch, name, wlock)
1952 r.copy(patch, name, wlock)
1953 r.remove([patch], False, wlock)
1953 r.remove([patch], False, wlock)
1954
1954
1955 q.save_dirty()
1955 q.save_dirty()
1956
1956
1957 def restore(ui, repo, rev, **opts):
1957 def restore(ui, repo, rev, **opts):
1958 """restore the queue state saved by a rev"""
1958 """restore the queue state saved by a rev"""
1959 rev = repo.lookup(rev)
1959 rev = repo.lookup(rev)
1960 q = repo.mq
1960 q = repo.mq
1961 q.restore(repo, rev, delete=opts['delete'],
1961 q.restore(repo, rev, delete=opts['delete'],
1962 qupdate=opts['update'])
1962 qupdate=opts['update'])
1963 q.save_dirty()
1963 q.save_dirty()
1964 return 0
1964 return 0
1965
1965
1966 def save(ui, repo, **opts):
1966 def save(ui, repo, **opts):
1967 """save current queue state"""
1967 """save current queue state"""
1968 q = repo.mq
1968 q = repo.mq
1969 message = cmdutil.logmessage(opts)
1969 message = cmdutil.logmessage(opts)
1970 ret = q.save(repo, msg=message)
1970 ret = q.save(repo, msg=message)
1971 if ret:
1971 if ret:
1972 return ret
1972 return ret
1973 q.save_dirty()
1973 q.save_dirty()
1974 if opts['copy']:
1974 if opts['copy']:
1975 path = q.path
1975 path = q.path
1976 if opts['name']:
1976 if opts['name']:
1977 newpath = os.path.join(q.basepath, opts['name'])
1977 newpath = os.path.join(q.basepath, opts['name'])
1978 if os.path.exists(newpath):
1978 if os.path.exists(newpath):
1979 if not os.path.isdir(newpath):
1979 if not os.path.isdir(newpath):
1980 raise util.Abort(_('destination %s exists and is not '
1980 raise util.Abort(_('destination %s exists and is not '
1981 'a directory') % newpath)
1981 'a directory') % newpath)
1982 if not opts['force']:
1982 if not opts['force']:
1983 raise util.Abort(_('destination %s exists, '
1983 raise util.Abort(_('destination %s exists, '
1984 'use -f to force') % newpath)
1984 'use -f to force') % newpath)
1985 else:
1985 else:
1986 newpath = savename(path)
1986 newpath = savename(path)
1987 ui.warn("copy %s to %s\n" % (path, newpath))
1987 ui.warn("copy %s to %s\n" % (path, newpath))
1988 util.copyfiles(path, newpath)
1988 util.copyfiles(path, newpath)
1989 if opts['empty']:
1989 if opts['empty']:
1990 try:
1990 try:
1991 os.unlink(q.join(q.status_path))
1991 os.unlink(q.join(q.status_path))
1992 except:
1992 except:
1993 pass
1993 pass
1994 return 0
1994 return 0
1995
1995
1996 def strip(ui, repo, rev, **opts):
1996 def strip(ui, repo, rev, **opts):
1997 """strip a revision and all later revs on the same branch"""
1997 """strip a revision and all later revs on the same branch"""
1998 rev = repo.lookup(rev)
1998 rev = repo.lookup(rev)
1999 backup = 'all'
1999 backup = 'all'
2000 if opts['backup']:
2000 if opts['backup']:
2001 backup = 'strip'
2001 backup = 'strip'
2002 elif opts['nobackup']:
2002 elif opts['nobackup']:
2003 backup = 'none'
2003 backup = 'none'
2004 update = repo.dirstate.parents()[0] != revlog.nullid
2004 update = repo.dirstate.parents()[0] != revlog.nullid
2005 repo.mq.strip(repo, rev, backup=backup, update=update)
2005 repo.mq.strip(repo, rev, backup=backup, update=update)
2006 return 0
2006 return 0
2007
2007
2008 def select(ui, repo, *args, **opts):
2008 def select(ui, repo, *args, **opts):
2009 '''set or print guarded patches to push
2009 '''set or print guarded patches to push
2010
2010
2011 Use the qguard command to set or print guards on patch, then use
2011 Use the qguard command to set or print guards on patch, then use
2012 qselect to tell mq which guards to use. A patch will be pushed if it
2012 qselect to tell mq which guards to use. A patch will be pushed if it
2013 has no guards or any positive guards match the currently selected guard,
2013 has no guards or any positive guards match the currently selected guard,
2014 but will not be pushed if any negative guards match the current guard.
2014 but will not be pushed if any negative guards match the current guard.
2015 For example:
2015 For example:
2016
2016
2017 qguard foo.patch -stable (negative guard)
2017 qguard foo.patch -stable (negative guard)
2018 qguard bar.patch +stable (positive guard)
2018 qguard bar.patch +stable (positive guard)
2019 qselect stable
2019 qselect stable
2020
2020
2021 This activates the "stable" guard. mq will skip foo.patch (because
2021 This activates the "stable" guard. mq will skip foo.patch (because
2022 it has a negative match) but push bar.patch (because it
2022 it has a negative match) but push bar.patch (because it
2023 has a positive match).
2023 has a positive match).
2024
2024
2025 With no arguments, prints the currently active guards.
2025 With no arguments, prints the currently active guards.
2026 With one argument, sets the active guard.
2026 With one argument, sets the active guard.
2027
2027
2028 Use -n/--none to deactivate guards (no other arguments needed).
2028 Use -n/--none to deactivate guards (no other arguments needed).
2029 When no guards are active, patches with positive guards are skipped
2029 When no guards are active, patches with positive guards are skipped
2030 and patches with negative guards are pushed.
2030 and patches with negative guards are pushed.
2031
2031
2032 qselect can change the guards on applied patches. It does not pop
2032 qselect can change the guards on applied patches. It does not pop
2033 guarded patches by default. Use --pop to pop back to the last applied
2033 guarded patches by default. Use --pop to pop back to the last applied
2034 patch that is not guarded. Use --reapply (which implies --pop) to push
2034 patch that is not guarded. Use --reapply (which implies --pop) to push
2035 back to the current patch afterwards, but skip guarded patches.
2035 back to the current patch afterwards, but skip guarded patches.
2036
2036
2037 Use -s/--series to print a list of all guards in the series file (no
2037 Use -s/--series to print a list of all guards in the series file (no
2038 other arguments needed). Use -v for more information.'''
2038 other arguments needed). Use -v for more information.'''
2039
2039
2040 q = repo.mq
2040 q = repo.mq
2041 guards = q.active()
2041 guards = q.active()
2042 if args or opts['none']:
2042 if args or opts['none']:
2043 old_unapplied = q.unapplied(repo)
2043 old_unapplied = q.unapplied(repo)
2044 old_guarded = [i for i in xrange(len(q.applied)) if
2044 old_guarded = [i for i in xrange(len(q.applied)) if
2045 not q.pushable(i)[0]]
2045 not q.pushable(i)[0]]
2046 q.set_active(args)
2046 q.set_active(args)
2047 q.save_dirty()
2047 q.save_dirty()
2048 if not args:
2048 if not args:
2049 ui.status(_('guards deactivated\n'))
2049 ui.status(_('guards deactivated\n'))
2050 if not opts['pop'] and not opts['reapply']:
2050 if not opts['pop'] and not opts['reapply']:
2051 unapplied = q.unapplied(repo)
2051 unapplied = q.unapplied(repo)
2052 guarded = [i for i in xrange(len(q.applied))
2052 guarded = [i for i in xrange(len(q.applied))
2053 if not q.pushable(i)[0]]
2053 if not q.pushable(i)[0]]
2054 if len(unapplied) != len(old_unapplied):
2054 if len(unapplied) != len(old_unapplied):
2055 ui.status(_('number of unguarded, unapplied patches has '
2055 ui.status(_('number of unguarded, unapplied patches has '
2056 'changed from %d to %d\n') %
2056 'changed from %d to %d\n') %
2057 (len(old_unapplied), len(unapplied)))
2057 (len(old_unapplied), len(unapplied)))
2058 if len(guarded) != len(old_guarded):
2058 if len(guarded) != len(old_guarded):
2059 ui.status(_('number of guarded, applied patches has changed '
2059 ui.status(_('number of guarded, applied patches has changed '
2060 'from %d to %d\n') %
2060 'from %d to %d\n') %
2061 (len(old_guarded), len(guarded)))
2061 (len(old_guarded), len(guarded)))
2062 elif opts['series']:
2062 elif opts['series']:
2063 guards = {}
2063 guards = {}
2064 noguards = 0
2064 noguards = 0
2065 for gs in q.series_guards:
2065 for gs in q.series_guards:
2066 if not gs:
2066 if not gs:
2067 noguards += 1
2067 noguards += 1
2068 for g in gs:
2068 for g in gs:
2069 guards.setdefault(g, 0)
2069 guards.setdefault(g, 0)
2070 guards[g] += 1
2070 guards[g] += 1
2071 if ui.verbose:
2071 if ui.verbose:
2072 guards['NONE'] = noguards
2072 guards['NONE'] = noguards
2073 guards = guards.items()
2073 guards = guards.items()
2074 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2074 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2075 if guards:
2075 if guards:
2076 ui.note(_('guards in series file:\n'))
2076 ui.note(_('guards in series file:\n'))
2077 for guard, count in guards:
2077 for guard, count in guards:
2078 ui.note('%2d ' % count)
2078 ui.note('%2d ' % count)
2079 ui.write(guard, '\n')
2079 ui.write(guard, '\n')
2080 else:
2080 else:
2081 ui.note(_('no guards in series file\n'))
2081 ui.note(_('no guards in series file\n'))
2082 else:
2082 else:
2083 if guards:
2083 if guards:
2084 ui.note(_('active guards:\n'))
2084 ui.note(_('active guards:\n'))
2085 for g in guards:
2085 for g in guards:
2086 ui.write(g, '\n')
2086 ui.write(g, '\n')
2087 else:
2087 else:
2088 ui.write(_('no active guards\n'))
2088 ui.write(_('no active guards\n'))
2089 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2089 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2090 popped = False
2090 popped = False
2091 if opts['pop'] or opts['reapply']:
2091 if opts['pop'] or opts['reapply']:
2092 for i in xrange(len(q.applied)):
2092 for i in xrange(len(q.applied)):
2093 pushable, reason = q.pushable(i)
2093 pushable, reason = q.pushable(i)
2094 if not pushable:
2094 if not pushable:
2095 ui.status(_('popping guarded patches\n'))
2095 ui.status(_('popping guarded patches\n'))
2096 popped = True
2096 popped = True
2097 if i == 0:
2097 if i == 0:
2098 q.pop(repo, all=True)
2098 q.pop(repo, all=True)
2099 else:
2099 else:
2100 q.pop(repo, i-1)
2100 q.pop(repo, i-1)
2101 break
2101 break
2102 if popped:
2102 if popped:
2103 try:
2103 try:
2104 if reapply:
2104 if reapply:
2105 ui.status(_('reapplying unguarded patches\n'))
2105 ui.status(_('reapplying unguarded patches\n'))
2106 q.push(repo, reapply)
2106 q.push(repo, reapply)
2107 finally:
2107 finally:
2108 q.save_dirty()
2108 q.save_dirty()
2109
2109
2110 def reposetup(ui, repo):
2110 def reposetup(ui, repo):
2111 class mqrepo(repo.__class__):
2111 class mqrepo(repo.__class__):
2112 def abort_if_wdir_patched(self, errmsg, force=False):
2112 def abort_if_wdir_patched(self, errmsg, force=False):
2113 if self.mq.applied and not force:
2113 if self.mq.applied and not force:
2114 parent = revlog.hex(self.dirstate.parents()[0])
2114 parent = revlog.hex(self.dirstate.parents()[0])
2115 if parent in [s.rev for s in self.mq.applied]:
2115 if parent in [s.rev for s in self.mq.applied]:
2116 raise util.Abort(errmsg)
2116 raise util.Abort(errmsg)
2117
2117
2118 def commit(self, *args, **opts):
2118 def commit(self, *args, **opts):
2119 if len(args) >= 6:
2119 if len(args) >= 6:
2120 force = args[5]
2120 force = args[5]
2121 else:
2121 else:
2122 force = opts.get('force')
2122 force = opts.get('force')
2123 self.abort_if_wdir_patched(
2123 self.abort_if_wdir_patched(
2124 _('cannot commit over an applied mq patch'),
2124 _('cannot commit over an applied mq patch'),
2125 force)
2125 force)
2126
2126
2127 return super(mqrepo, self).commit(*args, **opts)
2127 return super(mqrepo, self).commit(*args, **opts)
2128
2128
2129 def push(self, remote, force=False, revs=None):
2129 def push(self, remote, force=False, revs=None):
2130 if self.mq.applied and not force and not revs:
2130 if self.mq.applied and not force and not revs:
2131 raise util.Abort(_('source has mq patches applied'))
2131 raise util.Abort(_('source has mq patches applied'))
2132 return super(mqrepo, self).push(remote, force, revs)
2132 return super(mqrepo, self).push(remote, force, revs)
2133
2133
2134 def tags(self):
2134 def tags(self):
2135 if self.tagscache:
2135 if self.tagscache:
2136 return self.tagscache
2136 return self.tagscache
2137
2137
2138 tagscache = super(mqrepo, self).tags()
2138 tagscache = super(mqrepo, self).tags()
2139
2139
2140 q = self.mq
2140 q = self.mq
2141 if not q.applied:
2141 if not q.applied:
2142 return tagscache
2142 return tagscache
2143
2143
2144 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2144 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2145 mqtags.append((mqtags[-1][0], 'qtip'))
2145 mqtags.append((mqtags[-1][0], 'qtip'))
2146 mqtags.append((mqtags[0][0], 'qbase'))
2146 mqtags.append((mqtags[0][0], 'qbase'))
2147 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2147 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2148 for patch in mqtags:
2148 for patch in mqtags:
2149 if patch[1] in tagscache:
2149 if patch[1] in tagscache:
2150 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2150 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2151 else:
2151 else:
2152 tagscache[patch[1]] = patch[0]
2152 tagscache[patch[1]] = patch[0]
2153
2153
2154 return tagscache
2154 return tagscache
2155
2155
2156 def _branchtags(self):
2156 def _branchtags(self):
2157 q = self.mq
2157 q = self.mq
2158 if not q.applied:
2158 if not q.applied:
2159 return super(mqrepo, self)._branchtags()
2159 return super(mqrepo, self)._branchtags()
2160
2160
2161 self.branchcache = {} # avoid recursion in changectx
2161 self.branchcache = {} # avoid recursion in changectx
2162 cl = self.changelog
2162 cl = self.changelog
2163 partial, last, lrev = self._readbranchcache()
2163 partial, last, lrev = self._readbranchcache()
2164
2164
2165 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2165 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2166 start = lrev + 1
2166 start = lrev + 1
2167 if start < qbase:
2167 if start < qbase:
2168 # update the cache (excluding the patches) and save it
2168 # update the cache (excluding the patches) and save it
2169 self._updatebranchcache(partial, lrev+1, qbase)
2169 self._updatebranchcache(partial, lrev+1, qbase)
2170 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2170 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2171 start = qbase
2171 start = qbase
2172 # if start = qbase, the cache is as updated as it should be.
2172 # if start = qbase, the cache is as updated as it should be.
2173 # if start > qbase, the cache includes (part of) the patches.
2173 # if start > qbase, the cache includes (part of) the patches.
2174 # we might as well use it, but we won't save it.
2174 # we might as well use it, but we won't save it.
2175
2175
2176 # update the cache up to the tip
2176 # update the cache up to the tip
2177 self._updatebranchcache(partial, start, cl.count())
2177 self._updatebranchcache(partial, start, cl.count())
2178
2178
2179 return partial
2179 return partial
2180
2180
2181 if repo.local():
2181 if repo.local():
2182 repo.__class__ = mqrepo
2182 repo.__class__ = mqrepo
2183 repo.mq = queue(ui, repo.join(""))
2183 repo.mq = queue(ui, repo.join(""))
2184
2184
2185 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2185 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2186
2186
2187 cmdtable = {
2187 cmdtable = {
2188 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2188 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2189 "qclone": (clone,
2189 "qclone": (clone,
2190 [('', 'pull', None, _('use pull protocol to copy metadata')),
2190 [('', 'pull', None, _('use pull protocol to copy metadata')),
2191 ('U', 'noupdate', None, _('do not update the new working directories')),
2191 ('U', 'noupdate', None, _('do not update the new working directories')),
2192 ('', 'uncompressed', None,
2192 ('', 'uncompressed', None,
2193 _('use uncompressed transfer (fast over LAN)')),
2193 _('use uncompressed transfer (fast over LAN)')),
2194 ('e', 'ssh', '', _('specify ssh command to use')),
2194 ('e', 'ssh', '', _('specify ssh command to use')),
2195 ('p', 'patches', '', _('location of source patch repo')),
2195 ('p', 'patches', '', _('location of source patch repo')),
2196 ('', 'remotecmd', '',
2196 ('', 'remotecmd', '',
2197 _('specify hg command to run on the remote side'))],
2197 _('specify hg command to run on the remote side'))],
2198 'hg qclone [OPTION]... SOURCE [DEST]'),
2198 'hg qclone [OPTION]... SOURCE [DEST]'),
2199 "qcommit|qci":
2199 "qcommit|qci":
2200 (commit,
2200 (commit,
2201 commands.table["^commit|ci"][1],
2201 commands.table["^commit|ci"][1],
2202 'hg qcommit [OPTION]... [FILE]...'),
2202 'hg qcommit [OPTION]... [FILE]...'),
2203 "^qdiff": (diff,
2203 "^qdiff": (diff,
2204 [('g', 'git', None, _('use git extended diff format')),
2204 [('g', 'git', None, _('use git extended diff format')),
2205 ('I', 'include', [], _('include names matching the given patterns')),
2205 ('I', 'include', [], _('include names matching the given patterns')),
2206 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2206 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2207 'hg qdiff [-I] [-X] [FILE]...'),
2207 'hg qdiff [-I] [-X] [FILE]...'),
2208 "qdelete|qremove|qrm":
2208 "qdelete|qremove|qrm":
2209 (delete,
2209 (delete,
2210 [('k', 'keep', None, _('keep patch file')),
2210 [('k', 'keep', None, _('keep patch file')),
2211 ('r', 'rev', [], _('stop managing a revision'))],
2211 ('r', 'rev', [], _('stop managing a revision'))],
2212 'hg qdelete [-k] [-r REV]... PATCH...'),
2212 'hg qdelete [-k] [-r REV]... PATCH...'),
2213 'qfold':
2213 'qfold':
2214 (fold,
2214 (fold,
2215 [('e', 'edit', None, _('edit patch header')),
2215 [('e', 'edit', None, _('edit patch header')),
2216 ('k', 'keep', None, _('keep folded patch files'))
2216 ('k', 'keep', None, _('keep folded patch files'))
2217 ] + commands.commitopts,
2217 ] + commands.commitopts,
2218 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2218 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2219 'qgoto': (goto, [('f', 'force', None, _('overwrite any local changes'))],
2219 'qgoto': (goto, [('f', 'force', None, _('overwrite any local changes'))],
2220 'hg qgoto [OPT]... PATCH'),
2220 'hg qgoto [OPT]... PATCH'),
2221 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2221 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2222 ('n', 'none', None, _('drop all guards'))],
2222 ('n', 'none', None, _('drop all guards'))],
2223 'hg qguard [PATCH] [+GUARD]... [-GUARD]...'),
2223 'hg qguard [PATCH] [+GUARD]... [-GUARD]...'),
2224 'qheader': (header, [],
2224 'qheader': (header, [],
2225 _('hg qheader [PATCH]')),
2225 _('hg qheader [PATCH]')),
2226 "^qimport":
2226 "^qimport":
2227 (qimport,
2227 (qimport,
2228 [('e', 'existing', None, 'import file in patch dir'),
2228 [('e', 'existing', None, 'import file in patch dir'),
2229 ('n', 'name', '', 'patch file name'),
2229 ('n', 'name', '', 'patch file name'),
2230 ('f', 'force', None, 'overwrite existing files'),
2230 ('f', 'force', None, 'overwrite existing files'),
2231 ('r', 'rev', [], 'place existing revisions under mq control'),
2231 ('r', 'rev', [], 'place existing revisions under mq control'),
2232 ('g', 'git', None, _('use git extended diff format'))],
2232 ('g', 'git', None, _('use git extended diff format'))],
2233 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2233 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2234 "^qinit":
2234 "^qinit":
2235 (init,
2235 (init,
2236 [('c', 'create-repo', None, 'create queue repository')],
2236 [('c', 'create-repo', None, 'create queue repository')],
2237 'hg qinit [-c]'),
2237 'hg qinit [-c]'),
2238 "qnew":
2238 "qnew":
2239 (new,
2239 (new,
2240 [('e', 'edit', None, _('edit commit message')),
2240 [('e', 'edit', None, _('edit commit message')),
2241 ('f', 'force', None, _('import uncommitted changes into patch'))
2241 ('f', 'force', None, _('import uncommitted changes into patch'))
2242 ] + commands.commitopts,
2242 ] + commands.commitopts,
2243 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2243 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2244 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2244 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2245 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2245 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2246 "^qpop":
2246 "^qpop":
2247 (pop,
2247 (pop,
2248 [('a', 'all', None, 'pop all patches'),
2248 [('a', 'all', None, 'pop all patches'),
2249 ('n', 'name', '', 'queue name to pop'),
2249 ('n', 'name', '', 'queue name to pop'),
2250 ('f', 'force', None, 'forget any local changes')],
2250 ('f', 'force', None, 'forget any local changes')],
2251 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2251 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2252 "^qpush":
2252 "^qpush":
2253 (push,
2253 (push,
2254 [('f', 'force', None, 'apply if the patch has rejects'),
2254 [('f', 'force', None, 'apply if the patch has rejects'),
2255 ('l', 'list', None, 'list patch name in commit text'),
2255 ('l', 'list', None, 'list patch name in commit text'),
2256 ('a', 'all', None, 'apply all patches'),
2256 ('a', 'all', None, 'apply all patches'),
2257 ('m', 'merge', None, 'merge from another queue'),
2257 ('m', 'merge', None, 'merge from another queue'),
2258 ('n', 'name', '', 'merge queue name')],
2258 ('n', 'name', '', 'merge queue name')],
2259 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2259 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2260 "^qrefresh":
2260 "^qrefresh":
2261 (refresh,
2261 (refresh,
2262 [('e', 'edit', None, _('edit commit message')),
2262 [('e', 'edit', None, _('edit commit message')),
2263 ('g', 'git', None, _('use git extended diff format')),
2263 ('g', 'git', None, _('use git extended diff format')),
2264 ('s', 'short', None, 'refresh only files already in the patch'),
2264 ('s', 'short', None, 'refresh only files already in the patch'),
2265 ('I', 'include', [], _('include names matching the given patterns')),
2265 ('I', 'include', [], _('include names matching the given patterns')),
2266 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2266 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2267 ] + commands.commitopts,
2267 ] + commands.commitopts,
2268 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2268 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2269 'qrename|qmv':
2269 'qrename|qmv':
2270 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2270 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2271 "qrestore":
2271 "qrestore":
2272 (restore,
2272 (restore,
2273 [('d', 'delete', None, 'delete save entry'),
2273 [('d', 'delete', None, 'delete save entry'),
2274 ('u', 'update', None, 'update queue working dir')],
2274 ('u', 'update', None, 'update queue working dir')],
2275 'hg qrestore [-d] [-u] REV'),
2275 'hg qrestore [-d] [-u] REV'),
2276 "qsave":
2276 "qsave":
2277 (save,
2277 (save,
2278 [('c', 'copy', None, 'copy patch directory'),
2278 [('c', 'copy', None, 'copy patch directory'),
2279 ('n', 'name', '', 'copy directory name'),
2279 ('n', 'name', '', 'copy directory name'),
2280 ('e', 'empty', None, 'clear queue status file'),
2280 ('e', 'empty', None, 'clear queue status file'),
2281 ('f', 'force', None, 'force copy')] + commands.commitopts,
2281 ('f', 'force', None, 'force copy')] + commands.commitopts,
2282 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2282 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2283 "qselect": (select,
2283 "qselect": (select,
2284 [('n', 'none', None, _('disable all guards')),
2284 [('n', 'none', None, _('disable all guards')),
2285 ('s', 'series', None, _('list all guards in series file')),
2285 ('s', 'series', None, _('list all guards in series file')),
2286 ('', 'pop', None,
2286 ('', 'pop', None,
2287 _('pop to before first guarded applied patch')),
2287 _('pop to before first guarded applied patch')),
2288 ('', 'reapply', None, _('pop, then reapply patches'))],
2288 ('', 'reapply', None, _('pop, then reapply patches'))],
2289 'hg qselect [OPTION]... [GUARD]...'),
2289 'hg qselect [OPTION]... [GUARD]...'),
2290 "qseries":
2290 "qseries":
2291 (series,
2291 (series,
2292 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2292 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2293 'hg qseries [-ms]'),
2293 'hg qseries [-ms]'),
2294 "^strip":
2294 "^strip":
2295 (strip,
2295 (strip,
2296 [('f', 'force', None, 'force multi-head removal'),
2296 [('f', 'force', None, 'force multi-head removal'),
2297 ('b', 'backup', None, 'bundle unrelated changesets'),
2297 ('b', 'backup', None, 'bundle unrelated changesets'),
2298 ('n', 'nobackup', None, 'no backups')],
2298 ('n', 'nobackup', None, 'no backups')],
2299 'hg strip [-f] [-b] [-n] REV'),
2299 'hg strip [-f] [-b] [-n] REV'),
2300 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2300 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2301 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2301 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2302 }
2302 }
@@ -1,481 +1,481 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import *
10 from node import *
11 from i18n import _
11 from i18n import _
12 import struct, os, time, bisect, stat, strutil, util, re, errno, ignore
12 import struct, os, time, bisect, stat, strutil, util, re, errno, ignore
13 import cStringIO
13 import cStringIO
14
14
15 _unknown = ('?', 0, 0, 0)
15 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
16 _format = ">cllll"
17
17
18 class dirstate(object):
18 class dirstate(object):
19
19
20 def __init__(self, opener, ui, root):
20 def __init__(self, opener, ui, root):
21 self.opener = opener
21 self.opener = opener
22 self.root = root
22 self.root = root
23 self._dirty = 0
23 self._dirty = 0
24 self.ui = ui
24 self.ui = ui
25
25
26 def __getattr__(self, name):
26 def __getattr__(self, name):
27 if name == 'map':
27 if name == 'map':
28 self.read()
28 self.read()
29 return self.map
29 return self.map
30 elif name == 'copymap':
30 elif name == 'copymap':
31 self.read()
31 self.read()
32 return self.copymap
32 return self.copymap
33 elif name == '_branch':
33 elif name == '_branch':
34 try:
34 try:
35 self._branch = self.opener("branch").read().strip()\
35 self._branch = self.opener("branch").read().strip()\
36 or "default"
36 or "default"
37 except IOError:
37 except IOError:
38 self._branch = "default"
38 self._branch = "default"
39 return self._branch
39 return self._branch
40 elif name == 'pl':
40 elif name == 'pl':
41 self.pl = [nullid, nullid]
41 self.pl = [nullid, nullid]
42 try:
42 try:
43 st = self.opener("dirstate").read(40)
43 st = self.opener("dirstate").read(40)
44 if len(st) == 40:
44 if len(st) == 40:
45 self.pl = st[:20], st[20:40]
45 self.pl = st[:20], st[20:40]
46 except IOError, err:
46 except IOError, err:
47 if err.errno != errno.ENOENT: raise
47 if err.errno != errno.ENOENT: raise
48 return self.pl
48 return self.pl
49 elif name == 'dirs':
49 elif name == 'dirs':
50 self.dirs = {}
50 self.dirs = {}
51 for f in self.map:
51 for f in self.map:
52 self.updatedirs(f, 1)
52 self.updatedirs(f, 1)
53 return self.dirs
53 return self.dirs
54 elif name == '_ignore':
54 elif name == '_ignore':
55 files = [self.wjoin('.hgignore')] + self.ui.hgignorefiles()
55 files = [self.wjoin('.hgignore')] + self.ui.hgignorefiles()
56 self._ignore = ignore.ignore(self.root, files, self.ui.warn)
56 self._ignore = ignore.ignore(self.root, files, self.ui.warn)
57 return self._ignore
57 return self._ignore
58 elif name == '_slash':
58 elif name == '_slash':
59 self._slash = self.ui.configbool('ui', 'slash') and os.sep != '/'
59 self._slash = self.ui.configbool('ui', 'slash') and os.sep != '/'
60 return self._slash
60 return self._slash
61 else:
61 else:
62 raise AttributeError, name
62 raise AttributeError, name
63
63
64 def wjoin(self, f):
64 def wjoin(self, f):
65 return os.path.join(self.root, f)
65 return os.path.join(self.root, f)
66
66
67 def getcwd(self):
67 def getcwd(self):
68 cwd = os.getcwd()
68 cwd = os.getcwd()
69 if cwd == self.root: return ''
69 if cwd == self.root: return ''
70 # self.root ends with a path separator if self.root is '/' or 'C:\'
70 # self.root ends with a path separator if self.root is '/' or 'C:\'
71 rootsep = self.root
71 rootsep = self.root
72 if not rootsep.endswith(os.sep):
72 if not rootsep.endswith(os.sep):
73 rootsep += os.sep
73 rootsep += os.sep
74 if cwd.startswith(rootsep):
74 if cwd.startswith(rootsep):
75 return cwd[len(rootsep):]
75 return cwd[len(rootsep):]
76 else:
76 else:
77 # we're outside the repo. return an absolute path.
77 # we're outside the repo. return an absolute path.
78 return cwd
78 return cwd
79
79
80 def pathto(self, f, cwd=None):
80 def pathto(self, f, cwd=None):
81 if cwd is None:
81 if cwd is None:
82 cwd = self.getcwd()
82 cwd = self.getcwd()
83 path = util.pathto(self.root, cwd, f)
83 path = util.pathto(self.root, cwd, f)
84 if self._slash:
84 if self._slash:
85 return path.replace(os.sep, '/')
85 return path.replace(os.sep, '/')
86 return path
86 return path
87
87
88 def __del__(self):
88 def __del__(self):
89 self.write()
89 self.write()
90
90
91 def __getitem__(self, key):
91 def __getitem__(self, key):
92 return self.map[key]
92 return self.map[key]
93
93
94 def __contains__(self, key):
94 def __contains__(self, key):
95 return key in self.map
95 return key in self.map
96
96
97 def parents(self):
97 def parents(self):
98 return self.pl
98 return self.pl
99
99
100 def branch(self):
100 def branch(self):
101 return self._branch
101 return self._branch
102
102
103 def markdirty(self):
103 def markdirty(self):
104 self._dirty = 1
104 self._dirty = 1
105
105
106 def setparents(self, p1, p2=nullid):
106 def setparents(self, p1, p2=nullid):
107 self.markdirty()
107 self.markdirty()
108 self.pl = p1, p2
108 self.pl = p1, p2
109
109
110 def setbranch(self, branch):
110 def setbranch(self, branch):
111 self._branch = branch
111 self._branch = branch
112 self.opener("branch", "w").write(branch + '\n')
112 self.opener("branch", "w").write(branch + '\n')
113
113
114 def state(self, key):
114 def state(self, key):
115 return self.map.get(key, ("?",))[0]
115 return self.map.get(key, ("?",))[0]
116
116
117 def read(self):
117 def read(self):
118 self.map = {}
118 self.map = {}
119 self.copymap = {}
119 self.copymap = {}
120 self.pl = [nullid, nullid]
120 self.pl = [nullid, nullid]
121 try:
121 try:
122 st = self.opener("dirstate").read()
122 st = self.opener("dirstate").read()
123 except IOError, err:
123 except IOError, err:
124 if err.errno != errno.ENOENT: raise
124 if err.errno != errno.ENOENT: raise
125 return
125 return
126 if not st:
126 if not st:
127 return
127 return
128
128
129 self.pl = [st[:20], st[20: 40]]
129 self.pl = [st[:20], st[20: 40]]
130
130
131 # deref fields so they will be local in loop
131 # deref fields so they will be local in loop
132 dmap = self.map
132 dmap = self.map
133 copymap = self.copymap
133 copymap = self.copymap
134 unpack = struct.unpack
134 unpack = struct.unpack
135
135
136 pos = 40
136 pos = 40
137 e_size = struct.calcsize(_format)
137 e_size = struct.calcsize(_format)
138
138
139 while pos < len(st):
139 while pos < len(st):
140 newpos = pos + e_size
140 newpos = pos + e_size
141 e = unpack(_format, st[pos:newpos])
141 e = unpack(_format, st[pos:newpos])
142 l = e[4]
142 l = e[4]
143 pos = newpos
143 pos = newpos
144 newpos = pos + l
144 newpos = pos + l
145 f = st[pos:newpos]
145 f = st[pos:newpos]
146 if '\0' in f:
146 if '\0' in f:
147 f, c = f.split('\0')
147 f, c = f.split('\0')
148 copymap[f] = c
148 copymap[f] = c
149 dmap[f] = e[:4]
149 dmap[f] = e[:4]
150 pos = newpos
150 pos = newpos
151
151
152 def reload(self):
152 def invalidate(self):
153 for a in "map copymap _branch pl dirs _ignore".split():
153 for a in "map copymap _branch pl dirs _ignore".split():
154 if hasattr(self, a):
154 if hasattr(self, a):
155 self.__delattr__(a)
155 self.__delattr__(a)
156
156
157 def copy(self, source, dest):
157 def copy(self, source, dest):
158 self.markdirty()
158 self.markdirty()
159 self.copymap[dest] = source
159 self.copymap[dest] = source
160
160
161 def copied(self, file):
161 def copied(self, file):
162 return self.copymap.get(file, None)
162 return self.copymap.get(file, None)
163
163
164 def copies(self):
164 def copies(self):
165 return self.copymap
165 return self.copymap
166
166
167 def updatedirs(self, path, delta):
167 def updatedirs(self, path, delta):
168 for c in strutil.findall(path, '/'):
168 for c in strutil.findall(path, '/'):
169 pc = path[:c]
169 pc = path[:c]
170 self.dirs.setdefault(pc, 0)
170 self.dirs.setdefault(pc, 0)
171 self.dirs[pc] += delta
171 self.dirs[pc] += delta
172
172
173 def checkinterfering(self, files):
173 def checkinterfering(self, files):
174 def prefixes(f):
174 def prefixes(f):
175 for c in strutil.rfindall(f, '/'):
175 for c in strutil.rfindall(f, '/'):
176 yield f[:c]
176 yield f[:c]
177 seendirs = {}
177 seendirs = {}
178 for f in files:
178 for f in files:
179 # shadows
179 # shadows
180 if self.dirs.get(f):
180 if self.dirs.get(f):
181 raise util.Abort(_('directory named %r already in dirstate') %
181 raise util.Abort(_('directory named %r already in dirstate') %
182 f)
182 f)
183 for d in prefixes(f):
183 for d in prefixes(f):
184 if d in seendirs:
184 if d in seendirs:
185 break
185 break
186 if d in self.map:
186 if d in self.map:
187 raise util.Abort(_('file named %r already in dirstate') %
187 raise util.Abort(_('file named %r already in dirstate') %
188 d)
188 d)
189 seendirs[d] = True
189 seendirs[d] = True
190 # disallowed
190 # disallowed
191 if '\r' in f or '\n' in f:
191 if '\r' in f or '\n' in f:
192 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
192 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
193
193
194 def update(self, files, state, **kw):
194 def update(self, files, state, **kw):
195 ''' current states:
195 ''' current states:
196 n normal
196 n normal
197 m needs merging
197 m needs merging
198 r marked for removal
198 r marked for removal
199 a marked for addition'''
199 a marked for addition'''
200
200
201 if not files: return
201 if not files: return
202 self.markdirty()
202 self.markdirty()
203 if state == "a":
203 if state == "a":
204 self.checkinterfering(files)
204 self.checkinterfering(files)
205 for f in files:
205 for f in files:
206 if state == "r":
206 if state == "r":
207 self.map[f] = ('r', 0, 0, 0)
207 self.map[f] = ('r', 0, 0, 0)
208 self.updatedirs(f, -1)
208 self.updatedirs(f, -1)
209 else:
209 else:
210 if state == "a":
210 if state == "a":
211 self.updatedirs(f, 1)
211 self.updatedirs(f, 1)
212 s = os.lstat(self.wjoin(f))
212 s = os.lstat(self.wjoin(f))
213 st_size = kw.get('st_size', s.st_size)
213 st_size = kw.get('st_size', s.st_size)
214 st_mtime = kw.get('st_mtime', s.st_mtime)
214 st_mtime = kw.get('st_mtime', s.st_mtime)
215 self.map[f] = (state, s.st_mode, st_size, st_mtime)
215 self.map[f] = (state, s.st_mode, st_size, st_mtime)
216 if self.copymap.has_key(f):
216 if self.copymap.has_key(f):
217 del self.copymap[f]
217 del self.copymap[f]
218
218
219 def forget(self, files):
219 def forget(self, files):
220 if not files: return
220 if not files: return
221 self.markdirty()
221 self.markdirty()
222 for f in files:
222 for f in files:
223 try:
223 try:
224 del self.map[f]
224 del self.map[f]
225 self.updatedirs(f, -1)
225 self.updatedirs(f, -1)
226 except KeyError:
226 except KeyError:
227 self.ui.warn(_("not in dirstate: %s!\n") % f)
227 self.ui.warn(_("not in dirstate: %s!\n") % f)
228 pass
228 pass
229
229
230 def rebuild(self, parent, files):
230 def rebuild(self, parent, files):
231 self.reload()
231 self.invalidate()
232 for f in files:
232 for f in files:
233 if files.execf(f):
233 if files.execf(f):
234 self.map[f] = ('n', 0777, -1, 0)
234 self.map[f] = ('n', 0777, -1, 0)
235 else:
235 else:
236 self.map[f] = ('n', 0666, -1, 0)
236 self.map[f] = ('n', 0666, -1, 0)
237 self.pl = (parent, nullid)
237 self.pl = (parent, nullid)
238 self.markdirty()
238 self.markdirty()
239
239
240 def write(self):
240 def write(self):
241 if not self._dirty:
241 if not self._dirty:
242 return
242 return
243 cs = cStringIO.StringIO()
243 cs = cStringIO.StringIO()
244 cs.write("".join(self.pl))
244 cs.write("".join(self.pl))
245 for f, e in self.map.iteritems():
245 for f, e in self.map.iteritems():
246 c = self.copied(f)
246 c = self.copied(f)
247 if c:
247 if c:
248 f = f + "\0" + c
248 f = f + "\0" + c
249 e = struct.pack(_format, e[0], e[1], e[2], e[3], len(f))
249 e = struct.pack(_format, e[0], e[1], e[2], e[3], len(f))
250 cs.write(e)
250 cs.write(e)
251 cs.write(f)
251 cs.write(f)
252 st = self.opener("dirstate", "w", atomictemp=True)
252 st = self.opener("dirstate", "w", atomictemp=True)
253 st.write(cs.getvalue())
253 st.write(cs.getvalue())
254 st.rename()
254 st.rename()
255 self._dirty = 0
255 self._dirty = 0
256
256
257 def filterfiles(self, files):
257 def filterfiles(self, files):
258 ret = {}
258 ret = {}
259 unknown = []
259 unknown = []
260
260
261 for x in files:
261 for x in files:
262 if x == '.':
262 if x == '.':
263 return self.map.copy()
263 return self.map.copy()
264 if x not in self.map:
264 if x not in self.map:
265 unknown.append(x)
265 unknown.append(x)
266 else:
266 else:
267 ret[x] = self.map[x]
267 ret[x] = self.map[x]
268
268
269 if not unknown:
269 if not unknown:
270 return ret
270 return ret
271
271
272 b = self.map.keys()
272 b = self.map.keys()
273 b.sort()
273 b.sort()
274 blen = len(b)
274 blen = len(b)
275
275
276 for x in unknown:
276 for x in unknown:
277 bs = bisect.bisect(b, "%s%s" % (x, '/'))
277 bs = bisect.bisect(b, "%s%s" % (x, '/'))
278 while bs < blen:
278 while bs < blen:
279 s = b[bs]
279 s = b[bs]
280 if len(s) > len(x) and s.startswith(x):
280 if len(s) > len(x) and s.startswith(x):
281 ret[s] = self.map[s]
281 ret[s] = self.map[s]
282 else:
282 else:
283 break
283 break
284 bs += 1
284 bs += 1
285 return ret
285 return ret
286
286
287 def supported_type(self, f, st, verbose=False):
287 def supported_type(self, f, st, verbose=False):
288 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
288 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
289 return True
289 return True
290 if verbose:
290 if verbose:
291 kind = 'unknown'
291 kind = 'unknown'
292 if stat.S_ISCHR(st.st_mode): kind = _('character device')
292 if stat.S_ISCHR(st.st_mode): kind = _('character device')
293 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
293 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
294 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
294 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
295 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
295 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
296 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
296 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
297 self.ui.warn(_('%s: unsupported file type (type is %s)\n')
297 self.ui.warn(_('%s: unsupported file type (type is %s)\n')
298 % (self.pathto(f), kind))
298 % (self.pathto(f), kind))
299 return False
299 return False
300
300
301 def walk(self, files=None, match=util.always, badmatch=None):
301 def walk(self, files=None, match=util.always, badmatch=None):
302 # filter out the stat
302 # filter out the stat
303 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
303 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
304 yield src, f
304 yield src, f
305
305
306 def statwalk(self, files=None, match=util.always, ignored=False,
306 def statwalk(self, files=None, match=util.always, ignored=False,
307 badmatch=None, directories=False):
307 badmatch=None, directories=False):
308 '''
308 '''
309 walk recursively through the directory tree, finding all files
309 walk recursively through the directory tree, finding all files
310 matched by the match function
310 matched by the match function
311
311
312 results are yielded in a tuple (src, filename, st), where src
312 results are yielded in a tuple (src, filename, st), where src
313 is one of:
313 is one of:
314 'f' the file was found in the directory tree
314 'f' the file was found in the directory tree
315 'd' the file is a directory of the tree
315 'd' the file is a directory of the tree
316 'm' the file was only in the dirstate and not in the tree
316 'm' the file was only in the dirstate and not in the tree
317 'b' file was not found and matched badmatch
317 'b' file was not found and matched badmatch
318
318
319 and st is the stat result if the file was found in the directory.
319 and st is the stat result if the file was found in the directory.
320 '''
320 '''
321
321
322 # walk all files by default
322 # walk all files by default
323 if not files:
323 if not files:
324 files = ['.']
324 files = ['.']
325 dc = self.map.copy()
325 dc = self.map.copy()
326 else:
326 else:
327 files = util.unique(files)
327 files = util.unique(files)
328 dc = self.filterfiles(files)
328 dc = self.filterfiles(files)
329
329
330 def imatch(file_):
330 def imatch(file_):
331 if file_ not in dc and self._ignore(file_):
331 if file_ not in dc and self._ignore(file_):
332 return False
332 return False
333 return match(file_)
333 return match(file_)
334
334
335 ignore = self._ignore
335 ignore = self._ignore
336 if ignored:
336 if ignored:
337 imatch = match
337 imatch = match
338 ignore = util.never
338 ignore = util.never
339
339
340 # self.root may end with a path separator when self.root == '/'
340 # self.root may end with a path separator when self.root == '/'
341 common_prefix_len = len(self.root)
341 common_prefix_len = len(self.root)
342 if not self.root.endswith(os.sep):
342 if not self.root.endswith(os.sep):
343 common_prefix_len += 1
343 common_prefix_len += 1
344 # recursion free walker, faster than os.walk.
344 # recursion free walker, faster than os.walk.
345 def findfiles(s):
345 def findfiles(s):
346 work = [s]
346 work = [s]
347 if directories:
347 if directories:
348 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
348 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
349 while work:
349 while work:
350 top = work.pop()
350 top = work.pop()
351 names = os.listdir(top)
351 names = os.listdir(top)
352 names.sort()
352 names.sort()
353 # nd is the top of the repository dir tree
353 # nd is the top of the repository dir tree
354 nd = util.normpath(top[common_prefix_len:])
354 nd = util.normpath(top[common_prefix_len:])
355 if nd == '.':
355 if nd == '.':
356 nd = ''
356 nd = ''
357 else:
357 else:
358 # do not recurse into a repo contained in this
358 # do not recurse into a repo contained in this
359 # one. use bisect to find .hg directory so speed
359 # one. use bisect to find .hg directory so speed
360 # is good on big directory.
360 # is good on big directory.
361 hg = bisect.bisect_left(names, '.hg')
361 hg = bisect.bisect_left(names, '.hg')
362 if hg < len(names) and names[hg] == '.hg':
362 if hg < len(names) and names[hg] == '.hg':
363 if os.path.isdir(os.path.join(top, '.hg')):
363 if os.path.isdir(os.path.join(top, '.hg')):
364 continue
364 continue
365 for f in names:
365 for f in names:
366 np = util.pconvert(os.path.join(nd, f))
366 np = util.pconvert(os.path.join(nd, f))
367 if seen(np):
367 if seen(np):
368 continue
368 continue
369 p = os.path.join(top, f)
369 p = os.path.join(top, f)
370 # don't trip over symlinks
370 # don't trip over symlinks
371 st = os.lstat(p)
371 st = os.lstat(p)
372 if stat.S_ISDIR(st.st_mode):
372 if stat.S_ISDIR(st.st_mode):
373 if not ignore(np):
373 if not ignore(np):
374 work.append(p)
374 work.append(p)
375 if directories:
375 if directories:
376 yield 'd', np, st
376 yield 'd', np, st
377 if imatch(np) and np in dc:
377 if imatch(np) and np in dc:
378 yield 'm', np, st
378 yield 'm', np, st
379 elif imatch(np):
379 elif imatch(np):
380 if self.supported_type(np, st):
380 if self.supported_type(np, st):
381 yield 'f', np, st
381 yield 'f', np, st
382 elif np in dc:
382 elif np in dc:
383 yield 'm', np, st
383 yield 'm', np, st
384
384
385 known = {'.hg': 1}
385 known = {'.hg': 1}
386 def seen(fn):
386 def seen(fn):
387 if fn in known: return True
387 if fn in known: return True
388 known[fn] = 1
388 known[fn] = 1
389
389
390 # step one, find all files that match our criteria
390 # step one, find all files that match our criteria
391 files.sort()
391 files.sort()
392 for ff in files:
392 for ff in files:
393 nf = util.normpath(ff)
393 nf = util.normpath(ff)
394 f = self.wjoin(ff)
394 f = self.wjoin(ff)
395 try:
395 try:
396 st = os.lstat(f)
396 st = os.lstat(f)
397 except OSError, inst:
397 except OSError, inst:
398 found = False
398 found = False
399 for fn in dc:
399 for fn in dc:
400 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
400 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
401 found = True
401 found = True
402 break
402 break
403 if not found:
403 if not found:
404 if inst.errno != errno.ENOENT or not badmatch:
404 if inst.errno != errno.ENOENT or not badmatch:
405 self.ui.warn('%s: %s\n' % (self.pathto(ff),
405 self.ui.warn('%s: %s\n' % (self.pathto(ff),
406 inst.strerror))
406 inst.strerror))
407 elif badmatch and badmatch(ff) and imatch(nf):
407 elif badmatch and badmatch(ff) and imatch(nf):
408 yield 'b', ff, None
408 yield 'b', ff, None
409 continue
409 continue
410 if stat.S_ISDIR(st.st_mode):
410 if stat.S_ISDIR(st.st_mode):
411 cmp1 = (lambda x, y: cmp(x[1], y[1]))
411 cmp1 = (lambda x, y: cmp(x[1], y[1]))
412 sorted_ = [ x for x in findfiles(f) ]
412 sorted_ = [ x for x in findfiles(f) ]
413 sorted_.sort(cmp1)
413 sorted_.sort(cmp1)
414 for e in sorted_:
414 for e in sorted_:
415 yield e
415 yield e
416 else:
416 else:
417 if not seen(nf) and match(nf):
417 if not seen(nf) and match(nf):
418 if self.supported_type(ff, st, verbose=True):
418 if self.supported_type(ff, st, verbose=True):
419 yield 'f', nf, st
419 yield 'f', nf, st
420 elif ff in dc:
420 elif ff in dc:
421 yield 'm', nf, st
421 yield 'm', nf, st
422
422
423 # step two run through anything left in the dc hash and yield
423 # step two run through anything left in the dc hash and yield
424 # if we haven't already seen it
424 # if we haven't already seen it
425 ks = dc.keys()
425 ks = dc.keys()
426 ks.sort()
426 ks.sort()
427 for k in ks:
427 for k in ks:
428 if not seen(k) and imatch(k):
428 if not seen(k) and imatch(k):
429 yield 'm', k, None
429 yield 'm', k, None
430
430
431 def status(self, files=None, match=util.always, list_ignored=False,
431 def status(self, files=None, match=util.always, list_ignored=False,
432 list_clean=False):
432 list_clean=False):
433 lookup, modified, added, unknown, ignored = [], [], [], [], []
433 lookup, modified, added, unknown, ignored = [], [], [], [], []
434 removed, deleted, clean = [], [], []
434 removed, deleted, clean = [], [], []
435
435
436 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
436 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
437 try:
437 try:
438 type_, mode, size, time = self[fn]
438 type_, mode, size, time = self[fn]
439 except KeyError:
439 except KeyError:
440 if list_ignored and self._ignore(fn):
440 if list_ignored and self._ignore(fn):
441 ignored.append(fn)
441 ignored.append(fn)
442 else:
442 else:
443 unknown.append(fn)
443 unknown.append(fn)
444 continue
444 continue
445 if src == 'm':
445 if src == 'm':
446 nonexistent = True
446 nonexistent = True
447 if not st:
447 if not st:
448 try:
448 try:
449 st = os.lstat(self.wjoin(fn))
449 st = os.lstat(self.wjoin(fn))
450 except OSError, inst:
450 except OSError, inst:
451 if inst.errno != errno.ENOENT:
451 if inst.errno != errno.ENOENT:
452 raise
452 raise
453 st = None
453 st = None
454 # We need to re-check that it is a valid file
454 # We need to re-check that it is a valid file
455 if st and self.supported_type(fn, st):
455 if st and self.supported_type(fn, st):
456 nonexistent = False
456 nonexistent = False
457 # XXX: what to do with file no longer present in the fs
457 # XXX: what to do with file no longer present in the fs
458 # who are not removed in the dirstate ?
458 # who are not removed in the dirstate ?
459 if nonexistent and type_ in "nm":
459 if nonexistent and type_ in "nm":
460 deleted.append(fn)
460 deleted.append(fn)
461 continue
461 continue
462 # check the common case first
462 # check the common case first
463 if type_ == 'n':
463 if type_ == 'n':
464 if not st:
464 if not st:
465 st = os.lstat(self.wjoin(fn))
465 st = os.lstat(self.wjoin(fn))
466 if size >= 0 and (size != st.st_size
466 if size >= 0 and (size != st.st_size
467 or (mode ^ st.st_mode) & 0100):
467 or (mode ^ st.st_mode) & 0100):
468 modified.append(fn)
468 modified.append(fn)
469 elif time != int(st.st_mtime):
469 elif time != int(st.st_mtime):
470 lookup.append(fn)
470 lookup.append(fn)
471 elif list_clean:
471 elif list_clean:
472 clean.append(fn)
472 clean.append(fn)
473 elif type_ == 'm':
473 elif type_ == 'm':
474 modified.append(fn)
474 modified.append(fn)
475 elif type_ == 'a':
475 elif type_ == 'a':
476 added.append(fn)
476 added.append(fn)
477 elif type_ == 'r':
477 elif type_ == 'r':
478 removed.append(fn)
478 removed.append(fn)
479
479
480 return (lookup, modified, added, removed, deleted, unknown, ignored,
480 return (lookup, modified, added, removed, deleted, unknown, ignored,
481 clean)
481 clean)
@@ -1,1969 +1,1968 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.path = path
23 self.path = path
24 self.root = os.path.realpath(path)
24 self.root = os.path.realpath(path)
25 self.path = os.path.join(self.root, ".hg")
25 self.path = os.path.join(self.root, ".hg")
26 self.origroot = path
26 self.origroot = path
27 self.opener = util.opener(self.path)
27 self.opener = util.opener(self.path)
28 self.wopener = util.opener(self.root)
28 self.wopener = util.opener(self.root)
29
29
30 if not os.path.isdir(self.path):
30 if not os.path.isdir(self.path):
31 if create:
31 if create:
32 if not os.path.exists(path):
32 if not os.path.exists(path):
33 os.mkdir(path)
33 os.mkdir(path)
34 os.mkdir(self.path)
34 os.mkdir(self.path)
35 requirements = ["revlogv1"]
35 requirements = ["revlogv1"]
36 if parentui.configbool('format', 'usestore', True):
36 if parentui.configbool('format', 'usestore', True):
37 os.mkdir(os.path.join(self.path, "store"))
37 os.mkdir(os.path.join(self.path, "store"))
38 requirements.append("store")
38 requirements.append("store")
39 # create an invalid changelog
39 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
40 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
41 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
42 ' dummy changelog to prevent using the old repo layout'
43 )
43 )
44 reqfile = self.opener("requires", "w")
44 reqfile = self.opener("requires", "w")
45 for r in requirements:
45 for r in requirements:
46 reqfile.write("%s\n" % r)
46 reqfile.write("%s\n" % r)
47 reqfile.close()
47 reqfile.close()
48 else:
48 else:
49 raise repo.RepoError(_("repository %s not found") % path)
49 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
50 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
51 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
52 else:
53 # find requirements
53 # find requirements
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 requirements = []
59 requirements = []
60 # check them
60 # check them
61 for r in requirements:
61 for r in requirements:
62 if r not in self.supported:
62 if r not in self.supported:
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64
64
65 # setup store
65 # setup store
66 if "store" in requirements:
66 if "store" in requirements:
67 self.encodefn = util.encodefilename
67 self.encodefn = util.encodefilename
68 self.decodefn = util.decodefilename
68 self.decodefn = util.decodefilename
69 self.spath = os.path.join(self.path, "store")
69 self.spath = os.path.join(self.path, "store")
70 else:
70 else:
71 self.encodefn = lambda x: x
71 self.encodefn = lambda x: x
72 self.decodefn = lambda x: x
72 self.decodefn = lambda x: x
73 self.spath = self.path
73 self.spath = self.path
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75
75
76 self.ui = ui.ui(parentui=parentui)
76 self.ui = ui.ui(parentui=parentui)
77 try:
77 try:
78 self.ui.readconfig(self.join("hgrc"), self.root)
78 self.ui.readconfig(self.join("hgrc"), self.root)
79 except IOError:
79 except IOError:
80 pass
80 pass
81
81
82 fallback = self.ui.config('ui', 'fallbackencoding')
82 fallback = self.ui.config('ui', 'fallbackencoding')
83 if fallback:
83 if fallback:
84 util._fallbackencoding = fallback
84 util._fallbackencoding = fallback
85
85
86 self.tagscache = None
86 self.tagscache = None
87 self.branchcache = None
87 self.branchcache = None
88 self.nodetagscache = None
88 self.nodetagscache = None
89 self.filterpats = {}
89 self.filterpats = {}
90 self.transhandle = None
90 self.transhandle = None
91
91
92 def __getattr__(self, name):
92 def __getattr__(self, name):
93 if name == 'changelog':
93 if name == 'changelog':
94 self.changelog = changelog.changelog(self.sopener)
94 self.changelog = changelog.changelog(self.sopener)
95 self.sopener.defversion = self.changelog.version
95 self.sopener.defversion = self.changelog.version
96 return self.changelog
96 return self.changelog
97 if name == 'manifest':
97 if name == 'manifest':
98 self.changelog
98 self.changelog
99 self.manifest = manifest.manifest(self.sopener)
99 self.manifest = manifest.manifest(self.sopener)
100 return self.manifest
100 return self.manifest
101 if name == 'dirstate':
101 if name == 'dirstate':
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 return self.dirstate
103 return self.dirstate
104 else:
104 else:
105 raise AttributeError, name
105 raise AttributeError, name
106
106
107 def url(self):
107 def url(self):
108 return 'file:' + self.root
108 return 'file:' + self.root
109
109
110 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
111 def callhook(hname, funcname):
111 def callhook(hname, funcname):
112 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
113 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
114 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
115 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
116
116
117 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
118 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
119 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
120
120
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 obj = funcname
122 obj = funcname
123 if not callable(obj):
123 if not callable(obj):
124 d = funcname.rfind('.')
124 d = funcname.rfind('.')
125 if d == -1:
125 if d == -1:
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
128 modname = funcname[:d]
128 modname = funcname[:d]
129 try:
129 try:
130 obj = __import__(modname)
130 obj = __import__(modname)
131 except ImportError:
131 except ImportError:
132 try:
132 try:
133 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
134 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
135 except ImportError:
135 except ImportError:
136 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
137 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
138 (hname, modname))
138 (hname, modname))
139 try:
139 try:
140 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
141 obj = getattr(obj, p)
141 obj = getattr(obj, p)
142 except AttributeError, err:
142 except AttributeError, err:
143 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
144 '("%s" is not defined)') %
144 '("%s" is not defined)') %
145 (hname, funcname))
145 (hname, funcname))
146 if not callable(obj):
146 if not callable(obj):
147 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not callable)') %
148 '("%s" is not callable)') %
149 (hname, funcname))
149 (hname, funcname))
150 try:
150 try:
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
153 raise
153 raise
154 except Exception, exc:
154 except Exception, exc:
155 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
156 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 (hname, exc.args[0]))
157 (hname, exc.args[0]))
158 else:
158 else:
159 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
160 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
161 if throw:
161 if throw:
162 raise
162 raise
163 self.ui.print_exc()
163 self.ui.print_exc()
164 return True
164 return True
165 if r:
165 if r:
166 if throw:
166 if throw:
167 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 return r
169 return r
170
170
171 def runhook(name, cmd):
171 def runhook(name, cmd):
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
175 if r:
175 if r:
176 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
177 if throw:
177 if throw:
178 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 return r
180 return r
181
181
182 r = False
182 r = False
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
185 hooks.sort()
185 hooks.sort()
186 for hname, cmd in hooks:
186 for hname, cmd in hooks:
187 if callable(cmd):
187 if callable(cmd):
188 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
189 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
190 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
191 else:
191 else:
192 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
193 return r
193 return r
194
194
195 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
196
196
197 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
198 use_dirstate = parent is None
198 use_dirstate = parent is None
199
199
200 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
201 if c in name:
201 if c in name:
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203
203
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205
205
206 if local:
206 if local:
207 # local tags are stored in the current charset
207 # local tags are stored in the current charset
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
210 return
210 return
211
211
212 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 if use_dirstate:
214 if use_dirstate:
215 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
216 else:
216 else:
217 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
218 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
220 self.add(['.hgtags'])
221
221
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223
223
224 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
225
225
226 return tagnode
226 return tagnode
227
227
228 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
229 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
230
230
231 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
232 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
233 changeset is committed with the change.
233 changeset is committed with the change.
234
234
235 keyword arguments:
235 keyword arguments:
236
236
237 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
238 (default False)
238 (default False)
239
239
240 message: commit message to use if committing
240 message: commit message to use if committing
241
241
242 user: name of user to use if committing
242 user: name of user to use if committing
243
243
244 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251
251
252 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
253
253
254 def tags(self):
254 def tags(self):
255 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
256 if self.tagscache:
256 if self.tagscache:
257 return self.tagscache
257 return self.tagscache
258
258
259 globaltags = {}
259 globaltags = {}
260
260
261 def readtags(lines, fn):
261 def readtags(lines, fn):
262 filetags = {}
262 filetags = {}
263 count = 0
263 count = 0
264
264
265 def warn(msg):
265 def warn(msg):
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267
267
268 for l in lines:
268 for l in lines:
269 count += 1
269 count += 1
270 if not l:
270 if not l:
271 continue
271 continue
272 s = l.split(" ", 1)
272 s = l.split(" ", 1)
273 if len(s) != 2:
273 if len(s) != 2:
274 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
275 continue
275 continue
276 node, key = s
276 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
278 try:
279 bin_n = bin(node)
279 bin_n = bin(node)
280 except TypeError:
280 except TypeError:
281 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
282 continue
282 continue
283 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
284 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
285 continue
285 continue
286
286
287 h = []
287 h = []
288 if key in filetags:
288 if key in filetags:
289 n, h = filetags[key]
289 n, h = filetags[key]
290 h.append(n)
290 h.append(n)
291 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
292
292
293 for k,nh in filetags.items():
293 for k,nh in filetags.items():
294 if k not in globaltags:
294 if k not in globaltags:
295 globaltags[k] = nh
295 globaltags[k] = nh
296 continue
296 continue
297 # we prefer the global tag if:
297 # we prefer the global tag if:
298 # it supercedes us OR
298 # it supercedes us OR
299 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
300 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
301 an, ah = nh
301 an, ah = nh
302 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
303 if bn != an and an in bh and \
303 if bn != an and an in bh and \
304 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
305 an = bn
305 an = bn
306 ah.extend([n for n in bh if n not in ah])
306 ah.extend([n for n in bh if n not in ah])
307 globaltags[k] = an, ah
307 globaltags[k] = an, ah
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 f = None
310 f = None
311 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
312 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
313 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
314 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
315
315
316 try:
316 try:
317 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
318 # localtags are stored in the local character set
318 # localtags are stored in the local character set
319 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
320 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
321 except IOError:
321 except IOError:
322 pass
322 pass
323
323
324 self.tagscache = {}
324 self.tagscache = {}
325 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
326 n = nh[0]
326 n = nh[0]
327 if n != nullid:
327 if n != nullid:
328 self.tagscache[k] = n
328 self.tagscache[k] = n
329 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
330
330
331 return self.tagscache
331 return self.tagscache
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 heads = self.heads()
334 heads = self.heads()
335 heads.reverse()
335 heads.reverse()
336 last = {}
336 last = {}
337 ret = []
337 ret = []
338 for node in heads:
338 for node in heads:
339 c = self.changectx(node)
339 c = self.changectx(node)
340 rev = c.rev()
340 rev = c.rev()
341 try:
341 try:
342 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
343 except revlog.LookupError:
343 except revlog.LookupError:
344 continue
344 continue
345 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
346 if fnode in last:
346 if fnode in last:
347 ret[last[fnode]] = None
347 ret[last[fnode]] = None
348 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
349 return [item for item in ret if item]
349 return [item for item in ret if item]
350
350
351 def tagslist(self):
351 def tagslist(self):
352 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
353 l = []
353 l = []
354 for t, n in self.tags().items():
354 for t, n in self.tags().items():
355 try:
355 try:
356 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
357 except:
357 except:
358 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
359 l.append((r, t, n))
359 l.append((r, t, n))
360 l.sort()
360 l.sort()
361 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
362
362
363 def nodetags(self, node):
363 def nodetags(self, node):
364 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
365 if not self.nodetagscache:
365 if not self.nodetagscache:
366 self.nodetagscache = {}
366 self.nodetagscache = {}
367 for t, n in self.tags().items():
367 for t, n in self.tags().items():
368 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
369 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
370
370
371 def _branchtags(self):
371 def _branchtags(self):
372 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
373
373
374 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
375 if lrev != tiprev:
375 if lrev != tiprev:
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378
378
379 return partial
379 return partial
380
380
381 def branchtags(self):
381 def branchtags(self):
382 if self.branchcache is not None:
382 if self.branchcache is not None:
383 return self.branchcache
383 return self.branchcache
384
384
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 partial = self._branchtags()
386 partial = self._branchtags()
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
428 f.rename()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 pass
430 pass
431
431
432 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
433 for r in xrange(start, end):
434 c = self.changectx(r)
434 c = self.changectx(r)
435 b = c.branch()
435 b = c.branch()
436 partial[b] = c.node()
436 partial[b] = c.node()
437
437
438 def lookup(self, key):
438 def lookup(self, key):
439 if key == '.':
439 if key == '.':
440 key, second = self.dirstate.parents()
440 key, second = self.dirstate.parents()
441 if key == nullid:
441 if key == nullid:
442 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
443 if second != nullid:
443 if second != nullid:
444 self.ui.warn(_("warning: working directory has two parents, "
444 self.ui.warn(_("warning: working directory has two parents, "
445 "tag '.' uses the first\n"))
445 "tag '.' uses the first\n"))
446 elif key == 'null':
446 elif key == 'null':
447 return nullid
447 return nullid
448 n = self.changelog._match(key)
448 n = self.changelog._match(key)
449 if n:
449 if n:
450 return n
450 return n
451 if key in self.tags():
451 if key in self.tags():
452 return self.tags()[key]
452 return self.tags()[key]
453 if key in self.branchtags():
453 if key in self.branchtags():
454 return self.branchtags()[key]
454 return self.branchtags()[key]
455 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
456 if n:
456 if n:
457 return n
457 return n
458 raise repo.RepoError(_("unknown revision '%s'") % key)
458 raise repo.RepoError(_("unknown revision '%s'") % key)
459
459
460 def dev(self):
460 def dev(self):
461 return os.lstat(self.path).st_dev
461 return os.lstat(self.path).st_dev
462
462
463 def local(self):
463 def local(self):
464 return True
464 return True
465
465
466 def join(self, f):
466 def join(self, f):
467 return os.path.join(self.path, f)
467 return os.path.join(self.path, f)
468
468
469 def sjoin(self, f):
469 def sjoin(self, f):
470 f = self.encodefn(f)
470 f = self.encodefn(f)
471 return os.path.join(self.spath, f)
471 return os.path.join(self.spath, f)
472
472
473 def wjoin(self, f):
473 def wjoin(self, f):
474 return os.path.join(self.root, f)
474 return os.path.join(self.root, f)
475
475
476 def file(self, f):
476 def file(self, f):
477 if f[0] == '/':
477 if f[0] == '/':
478 f = f[1:]
478 f = f[1:]
479 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
480
480
481 def changectx(self, changeid=None):
481 def changectx(self, changeid=None):
482 return context.changectx(self, changeid)
482 return context.changectx(self, changeid)
483
483
484 def workingctx(self):
484 def workingctx(self):
485 return context.workingctx(self)
485 return context.workingctx(self)
486
486
487 def parents(self, changeid=None):
487 def parents(self, changeid=None):
488 '''
488 '''
489 get list of changectxs for parents of changeid or working directory
489 get list of changectxs for parents of changeid or working directory
490 '''
490 '''
491 if changeid is None:
491 if changeid is None:
492 pl = self.dirstate.parents()
492 pl = self.dirstate.parents()
493 else:
493 else:
494 n = self.changelog.lookup(changeid)
494 n = self.changelog.lookup(changeid)
495 pl = self.changelog.parents(n)
495 pl = self.changelog.parents(n)
496 if pl[1] == nullid:
496 if pl[1] == nullid:
497 return [self.changectx(pl[0])]
497 return [self.changectx(pl[0])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
499
499
500 def filectx(self, path, changeid=None, fileid=None):
500 def filectx(self, path, changeid=None, fileid=None):
501 """changeid can be a changeset revision, node, or tag.
501 """changeid can be a changeset revision, node, or tag.
502 fileid can be a file revision or node."""
502 fileid can be a file revision or node."""
503 return context.filectx(self, path, changeid, fileid)
503 return context.filectx(self, path, changeid, fileid)
504
504
505 def getcwd(self):
505 def getcwd(self):
506 return self.dirstate.getcwd()
506 return self.dirstate.getcwd()
507
507
508 def pathto(self, f, cwd=None):
508 def pathto(self, f, cwd=None):
509 return self.dirstate.pathto(f, cwd)
509 return self.dirstate.pathto(f, cwd)
510
510
511 def wfile(self, f, mode='r'):
511 def wfile(self, f, mode='r'):
512 return self.wopener(f, mode)
512 return self.wopener(f, mode)
513
513
514 def _link(self, f):
514 def _link(self, f):
515 return os.path.islink(self.wjoin(f))
515 return os.path.islink(self.wjoin(f))
516
516
517 def _filter(self, filter, filename, data):
517 def _filter(self, filter, filename, data):
518 if filter not in self.filterpats:
518 if filter not in self.filterpats:
519 l = []
519 l = []
520 for pat, cmd in self.ui.configitems(filter):
520 for pat, cmd in self.ui.configitems(filter):
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 l.append((mf, cmd))
522 l.append((mf, cmd))
523 self.filterpats[filter] = l
523 self.filterpats[filter] = l
524
524
525 for mf, cmd in self.filterpats[filter]:
525 for mf, cmd in self.filterpats[filter]:
526 if mf(filename):
526 if mf(filename):
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 data = util.filter(data, cmd)
528 data = util.filter(data, cmd)
529 break
529 break
530
530
531 return data
531 return data
532
532
533 def wread(self, filename):
533 def wread(self, filename):
534 if self._link(filename):
534 if self._link(filename):
535 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
536 else:
536 else:
537 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
538 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
539
539
540 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
541 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
542 if "l" in flags:
542 if "l" in flags:
543 f = self.wjoin(filename)
543 f = self.wjoin(filename)
544 try:
544 try:
545 os.unlink(f)
545 os.unlink(f)
546 except OSError:
546 except OSError:
547 pass
547 pass
548 d = os.path.dirname(f)
548 d = os.path.dirname(f)
549 if not os.path.exists(d):
549 if not os.path.exists(d):
550 os.makedirs(d)
550 os.makedirs(d)
551 os.symlink(data, f)
551 os.symlink(data, f)
552 else:
552 else:
553 try:
553 try:
554 if self._link(filename):
554 if self._link(filename):
555 os.unlink(self.wjoin(filename))
555 os.unlink(self.wjoin(filename))
556 except OSError:
556 except OSError:
557 pass
557 pass
558 self.wopener(filename, 'w').write(data)
558 self.wopener(filename, 'w').write(data)
559 util.set_exec(self.wjoin(filename), "x" in flags)
559 util.set_exec(self.wjoin(filename), "x" in flags)
560
560
561 def wwritedata(self, filename, data):
561 def wwritedata(self, filename, data):
562 return self._filter("decode", filename, data)
562 return self._filter("decode", filename, data)
563
563
564 def transaction(self):
564 def transaction(self):
565 tr = self.transhandle
565 tr = self.transhandle
566 if tr != None and tr.running():
566 if tr != None and tr.running():
567 return tr.nest()
567 return tr.nest()
568
568
569 # save dirstate for rollback
569 # save dirstate for rollback
570 try:
570 try:
571 ds = self.opener("dirstate").read()
571 ds = self.opener("dirstate").read()
572 except IOError:
572 except IOError:
573 ds = ""
573 ds = ""
574 self.opener("journal.dirstate", "w").write(ds)
574 self.opener("journal.dirstate", "w").write(ds)
575
575
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames))
580 aftertrans(renames))
581 self.transhandle = tr
581 self.transhandle = tr
582 return tr
582 return tr
583
583
584 def recover(self):
584 def recover(self):
585 l = self.lock()
585 l = self.lock()
586 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
587 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
589 self.reload()
589 self.invalidate()
590 return True
590 return True
591 else:
591 else:
592 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
593 return False
593 return False
594
594
595 def rollback(self, wlock=None, lock=None):
595 def rollback(self, wlock=None, lock=None):
596 if not wlock:
596 if not wlock:
597 wlock = self.wlock()
597 wlock = self.wlock()
598 if not lock:
598 if not lock:
599 lock = self.lock()
599 lock = self.lock()
600 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
601 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 self.reload()
604 self.invalidate()
605 self.wreload()
605 self.dirstate.invalidate()
606 else:
606 else:
607 self.ui.warn(_("no rollback information available\n"))
607 self.ui.warn(_("no rollback information available\n"))
608
608
609 def wreload(self):
609 def invalidate(self):
610 self.dirstate.reload()
610 for a in "changelog manifest".split():
611
611 if hasattr(self, a):
612 def reload(self):
612 self.__delattr__(a)
613 self.changelog.load()
614 self.manifest.load()
615 self.tagscache = None
613 self.tagscache = None
616 self.nodetagscache = None
614 self.nodetagscache = None
617
615
618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
616 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
619 desc=None):
617 desc=None):
620 try:
618 try:
621 l = lock.lock(lockname, 0, releasefn, desc=desc)
619 l = lock.lock(lockname, 0, releasefn, desc=desc)
622 except lock.LockHeld, inst:
620 except lock.LockHeld, inst:
623 if not wait:
621 if not wait:
624 raise
622 raise
625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
623 self.ui.warn(_("waiting for lock on %s held by %r\n") %
626 (desc, inst.locker))
624 (desc, inst.locker))
627 # default to 600 seconds timeout
625 # default to 600 seconds timeout
628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
626 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
629 releasefn, desc=desc)
627 releasefn, desc=desc)
630 if acquirefn:
628 if acquirefn:
631 acquirefn()
629 acquirefn()
632 return l
630 return l
633
631
634 def lock(self, wait=1):
632 def lock(self, wait=1):
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
633 return self.do_lock(self.sjoin("lock"), wait,
634 acquirefn=self.invalidate,
636 desc=_('repository %s') % self.origroot)
635 desc=_('repository %s') % self.origroot)
637
636
638 def wlock(self, wait=1):
637 def wlock(self, wait=1):
639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
638 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
640 self.wreload,
639 self.dirstate.invalidate,
641 desc=_('working directory of %s') % self.origroot)
640 desc=_('working directory of %s') % self.origroot)
642
641
643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
642 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
644 """
643 """
645 commit an individual file as part of a larger transaction
644 commit an individual file as part of a larger transaction
646 """
645 """
647
646
648 t = self.wread(fn)
647 t = self.wread(fn)
649 fl = self.file(fn)
648 fl = self.file(fn)
650 fp1 = manifest1.get(fn, nullid)
649 fp1 = manifest1.get(fn, nullid)
651 fp2 = manifest2.get(fn, nullid)
650 fp2 = manifest2.get(fn, nullid)
652
651
653 meta = {}
652 meta = {}
654 cp = self.dirstate.copied(fn)
653 cp = self.dirstate.copied(fn)
655 if cp:
654 if cp:
656 # Mark the new revision of this file as a copy of another
655 # Mark the new revision of this file as a copy of another
657 # file. This copy data will effectively act as a parent
656 # file. This copy data will effectively act as a parent
658 # of this new revision. If this is a merge, the first
657 # of this new revision. If this is a merge, the first
659 # parent will be the nullid (meaning "look up the copy data")
658 # parent will be the nullid (meaning "look up the copy data")
660 # and the second one will be the other parent. For example:
659 # and the second one will be the other parent. For example:
661 #
660 #
662 # 0 --- 1 --- 3 rev1 changes file foo
661 # 0 --- 1 --- 3 rev1 changes file foo
663 # \ / rev2 renames foo to bar and changes it
662 # \ / rev2 renames foo to bar and changes it
664 # \- 2 -/ rev3 should have bar with all changes and
663 # \- 2 -/ rev3 should have bar with all changes and
665 # should record that bar descends from
664 # should record that bar descends from
666 # bar in rev2 and foo in rev1
665 # bar in rev2 and foo in rev1
667 #
666 #
668 # this allows this merge to succeed:
667 # this allows this merge to succeed:
669 #
668 #
670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
669 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
671 # \ / merging rev3 and rev4 should use bar@rev2
670 # \ / merging rev3 and rev4 should use bar@rev2
672 # \- 2 --- 4 as the merge base
671 # \- 2 --- 4 as the merge base
673 #
672 #
674 meta["copy"] = cp
673 meta["copy"] = cp
675 if not manifest2: # not a branch merge
674 if not manifest2: # not a branch merge
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
675 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 fp2 = nullid
676 fp2 = nullid
678 elif fp2 != nullid: # copied on remote side
677 elif fp2 != nullid: # copied on remote side
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
678 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 elif fp1 != nullid: # copied on local side, reversed
679 elif fp1 != nullid: # copied on local side, reversed
681 meta["copyrev"] = hex(manifest2.get(cp))
680 meta["copyrev"] = hex(manifest2.get(cp))
682 fp2 = fp1
681 fp2 = fp1
683 else: # directory rename
682 else: # directory rename
684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
683 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 self.ui.debug(_(" %s: copy %s:%s\n") %
684 self.ui.debug(_(" %s: copy %s:%s\n") %
686 (fn, cp, meta["copyrev"]))
685 (fn, cp, meta["copyrev"]))
687 fp1 = nullid
686 fp1 = nullid
688 elif fp2 != nullid:
687 elif fp2 != nullid:
689 # is one parent an ancestor of the other?
688 # is one parent an ancestor of the other?
690 fpa = fl.ancestor(fp1, fp2)
689 fpa = fl.ancestor(fp1, fp2)
691 if fpa == fp1:
690 if fpa == fp1:
692 fp1, fp2 = fp2, nullid
691 fp1, fp2 = fp2, nullid
693 elif fpa == fp2:
692 elif fpa == fp2:
694 fp2 = nullid
693 fp2 = nullid
695
694
696 # is the file unmodified from the parent? report existing entry
695 # is the file unmodified from the parent? report existing entry
697 if fp2 == nullid and not fl.cmp(fp1, t):
696 if fp2 == nullid and not fl.cmp(fp1, t):
698 return fp1
697 return fp1
699
698
700 changelist.append(fn)
699 changelist.append(fn)
701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
700 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
702
701
703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
702 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
704 if p1 is None:
703 if p1 is None:
705 p1, p2 = self.dirstate.parents()
704 p1, p2 = self.dirstate.parents()
706 return self.commit(files=files, text=text, user=user, date=date,
705 return self.commit(files=files, text=text, user=user, date=date,
707 p1=p1, p2=p2, wlock=wlock, extra=extra)
706 p1=p1, p2=p2, wlock=wlock, extra=extra)
708
707
709 def commit(self, files=None, text="", user=None, date=None,
708 def commit(self, files=None, text="", user=None, date=None,
710 match=util.always, force=False, lock=None, wlock=None,
709 match=util.always, force=False, lock=None, wlock=None,
711 force_editor=False, p1=None, p2=None, extra={}):
710 force_editor=False, p1=None, p2=None, extra={}):
712
711
713 commit = []
712 commit = []
714 remove = []
713 remove = []
715 changed = []
714 changed = []
716 use_dirstate = (p1 is None) # not rawcommit
715 use_dirstate = (p1 is None) # not rawcommit
717 extra = extra.copy()
716 extra = extra.copy()
718
717
719 if use_dirstate:
718 if use_dirstate:
720 if files:
719 if files:
721 for f in files:
720 for f in files:
722 s = self.dirstate.state(f)
721 s = self.dirstate.state(f)
723 if s in 'nmai':
722 if s in 'nmai':
724 commit.append(f)
723 commit.append(f)
725 elif s == 'r':
724 elif s == 'r':
726 remove.append(f)
725 remove.append(f)
727 else:
726 else:
728 self.ui.warn(_("%s not tracked!\n") % f)
727 self.ui.warn(_("%s not tracked!\n") % f)
729 else:
728 else:
730 changes = self.status(match=match)[:5]
729 changes = self.status(match=match)[:5]
731 modified, added, removed, deleted, unknown = changes
730 modified, added, removed, deleted, unknown = changes
732 commit = modified + added
731 commit = modified + added
733 remove = removed
732 remove = removed
734 else:
733 else:
735 commit = files
734 commit = files
736
735
737 if use_dirstate:
736 if use_dirstate:
738 p1, p2 = self.dirstate.parents()
737 p1, p2 = self.dirstate.parents()
739 update_dirstate = True
738 update_dirstate = True
740 else:
739 else:
741 p1, p2 = p1, p2 or nullid
740 p1, p2 = p1, p2 or nullid
742 update_dirstate = (self.dirstate.parents()[0] == p1)
741 update_dirstate = (self.dirstate.parents()[0] == p1)
743
742
744 c1 = self.changelog.read(p1)
743 c1 = self.changelog.read(p1)
745 c2 = self.changelog.read(p2)
744 c2 = self.changelog.read(p2)
746 m1 = self.manifest.read(c1[0]).copy()
745 m1 = self.manifest.read(c1[0]).copy()
747 m2 = self.manifest.read(c2[0])
746 m2 = self.manifest.read(c2[0])
748
747
749 if use_dirstate:
748 if use_dirstate:
750 branchname = self.workingctx().branch()
749 branchname = self.workingctx().branch()
751 try:
750 try:
752 branchname = branchname.decode('UTF-8').encode('UTF-8')
751 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 except UnicodeDecodeError:
752 except UnicodeDecodeError:
754 raise util.Abort(_('branch name not in UTF-8!'))
753 raise util.Abort(_('branch name not in UTF-8!'))
755 else:
754 else:
756 branchname = ""
755 branchname = ""
757
756
758 if use_dirstate:
757 if use_dirstate:
759 oldname = c1[5].get("branch") # stored in UTF-8
758 oldname = c1[5].get("branch") # stored in UTF-8
760 if not commit and not remove and not force and p2 == nullid and \
759 if not commit and not remove and not force and p2 == nullid and \
761 branchname == oldname:
760 branchname == oldname:
762 self.ui.status(_("nothing changed\n"))
761 self.ui.status(_("nothing changed\n"))
763 return None
762 return None
764
763
765 xp1 = hex(p1)
764 xp1 = hex(p1)
766 if p2 == nullid: xp2 = ''
765 if p2 == nullid: xp2 = ''
767 else: xp2 = hex(p2)
766 else: xp2 = hex(p2)
768
767
769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
768 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770
769
771 if not wlock:
770 if not wlock:
772 wlock = self.wlock()
771 wlock = self.wlock()
773 if not lock:
772 if not lock:
774 lock = self.lock()
773 lock = self.lock()
775 tr = self.transaction()
774 tr = self.transaction()
776
775
777 # check in files
776 # check in files
778 new = {}
777 new = {}
779 linkrev = self.changelog.count()
778 linkrev = self.changelog.count()
780 commit.sort()
779 commit.sort()
781 is_exec = util.execfunc(self.root, m1.execf)
780 is_exec = util.execfunc(self.root, m1.execf)
782 is_link = util.linkfunc(self.root, m1.linkf)
781 is_link = util.linkfunc(self.root, m1.linkf)
783 for f in commit:
782 for f in commit:
784 self.ui.note(f + "\n")
783 self.ui.note(f + "\n")
785 try:
784 try:
786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
785 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
787 new_exec = is_exec(f)
786 new_exec = is_exec(f)
788 new_link = is_link(f)
787 new_link = is_link(f)
789 if not changed or changed[-1] != f:
788 if not changed or changed[-1] != f:
790 # mention the file in the changelog if some flag changed,
789 # mention the file in the changelog if some flag changed,
791 # even if there was no content change.
790 # even if there was no content change.
792 old_exec = m1.execf(f)
791 old_exec = m1.execf(f)
793 old_link = m1.linkf(f)
792 old_link = m1.linkf(f)
794 if old_exec != new_exec or old_link != new_link:
793 if old_exec != new_exec or old_link != new_link:
795 changed.append(f)
794 changed.append(f)
796 m1.set(f, new_exec, new_link)
795 m1.set(f, new_exec, new_link)
797 except (OSError, IOError):
796 except (OSError, IOError):
798 if use_dirstate:
797 if use_dirstate:
799 self.ui.warn(_("trouble committing %s!\n") % f)
798 self.ui.warn(_("trouble committing %s!\n") % f)
800 raise
799 raise
801 else:
800 else:
802 remove.append(f)
801 remove.append(f)
803
802
804 # update manifest
803 # update manifest
805 m1.update(new)
804 m1.update(new)
806 remove.sort()
805 remove.sort()
807 removed = []
806 removed = []
808
807
809 for f in remove:
808 for f in remove:
810 if f in m1:
809 if f in m1:
811 del m1[f]
810 del m1[f]
812 removed.append(f)
811 removed.append(f)
813 elif f in m2:
812 elif f in m2:
814 removed.append(f)
813 removed.append(f)
815 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
814 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
816
815
817 # add changeset
816 # add changeset
818 new = new.keys()
817 new = new.keys()
819 new.sort()
818 new.sort()
820
819
821 user = user or self.ui.username()
820 user = user or self.ui.username()
822 if not text or force_editor:
821 if not text or force_editor:
823 edittext = []
822 edittext = []
824 if text:
823 if text:
825 edittext.append(text)
824 edittext.append(text)
826 edittext.append("")
825 edittext.append("")
827 edittext.append("HG: user: %s" % user)
826 edittext.append("HG: user: %s" % user)
828 if p2 != nullid:
827 if p2 != nullid:
829 edittext.append("HG: branch merge")
828 edittext.append("HG: branch merge")
830 if branchname:
829 if branchname:
831 edittext.append("HG: branch %s" % util.tolocal(branchname))
830 edittext.append("HG: branch %s" % util.tolocal(branchname))
832 edittext.extend(["HG: changed %s" % f for f in changed])
831 edittext.extend(["HG: changed %s" % f for f in changed])
833 edittext.extend(["HG: removed %s" % f for f in removed])
832 edittext.extend(["HG: removed %s" % f for f in removed])
834 if not changed and not remove:
833 if not changed and not remove:
835 edittext.append("HG: no files changed")
834 edittext.append("HG: no files changed")
836 edittext.append("")
835 edittext.append("")
837 # run editor in the repository root
836 # run editor in the repository root
838 olddir = os.getcwd()
837 olddir = os.getcwd()
839 os.chdir(self.root)
838 os.chdir(self.root)
840 text = self.ui.edit("\n".join(edittext), user)
839 text = self.ui.edit("\n".join(edittext), user)
841 os.chdir(olddir)
840 os.chdir(olddir)
842
841
843 lines = [line.rstrip() for line in text.rstrip().splitlines()]
842 lines = [line.rstrip() for line in text.rstrip().splitlines()]
844 while lines and not lines[0]:
843 while lines and not lines[0]:
845 del lines[0]
844 del lines[0]
846 if not lines:
845 if not lines:
847 return None
846 return None
848 text = '\n'.join(lines)
847 text = '\n'.join(lines)
849 if branchname:
848 if branchname:
850 extra["branch"] = branchname
849 extra["branch"] = branchname
851 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
850 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
852 user, date, extra)
851 user, date, extra)
853 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
852 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
854 parent2=xp2)
853 parent2=xp2)
855 tr.close()
854 tr.close()
856
855
857 if self.branchcache and "branch" in extra:
856 if self.branchcache and "branch" in extra:
858 self.branchcache[util.tolocal(extra["branch"])] = n
857 self.branchcache[util.tolocal(extra["branch"])] = n
859
858
860 if use_dirstate or update_dirstate:
859 if use_dirstate or update_dirstate:
861 self.dirstate.setparents(n)
860 self.dirstate.setparents(n)
862 if use_dirstate:
861 if use_dirstate:
863 self.dirstate.update(new, "n")
862 self.dirstate.update(new, "n")
864 self.dirstate.forget(removed)
863 self.dirstate.forget(removed)
865
864
866 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
865 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
867 return n
866 return n
868
867
869 def walk(self, node=None, files=[], match=util.always, badmatch=None):
868 def walk(self, node=None, files=[], match=util.always, badmatch=None):
870 '''
869 '''
871 walk recursively through the directory tree or a given
870 walk recursively through the directory tree or a given
872 changeset, finding all files matched by the match
871 changeset, finding all files matched by the match
873 function
872 function
874
873
875 results are yielded in a tuple (src, filename), where src
874 results are yielded in a tuple (src, filename), where src
876 is one of:
875 is one of:
877 'f' the file was found in the directory tree
876 'f' the file was found in the directory tree
878 'm' the file was only in the dirstate and not in the tree
877 'm' the file was only in the dirstate and not in the tree
879 'b' file was not found and matched badmatch
878 'b' file was not found and matched badmatch
880 '''
879 '''
881
880
882 if node:
881 if node:
883 fdict = dict.fromkeys(files)
882 fdict = dict.fromkeys(files)
884 # for dirstate.walk, files=['.'] means "walk the whole tree".
883 # for dirstate.walk, files=['.'] means "walk the whole tree".
885 # follow that here, too
884 # follow that here, too
886 fdict.pop('.', None)
885 fdict.pop('.', None)
887 mdict = self.manifest.read(self.changelog.read(node)[0])
886 mdict = self.manifest.read(self.changelog.read(node)[0])
888 mfiles = mdict.keys()
887 mfiles = mdict.keys()
889 mfiles.sort()
888 mfiles.sort()
890 for fn in mfiles:
889 for fn in mfiles:
891 for ffn in fdict:
890 for ffn in fdict:
892 # match if the file is the exact name or a directory
891 # match if the file is the exact name or a directory
893 if ffn == fn or fn.startswith("%s/" % ffn):
892 if ffn == fn or fn.startswith("%s/" % ffn):
894 del fdict[ffn]
893 del fdict[ffn]
895 break
894 break
896 if match(fn):
895 if match(fn):
897 yield 'm', fn
896 yield 'm', fn
898 ffiles = fdict.keys()
897 ffiles = fdict.keys()
899 ffiles.sort()
898 ffiles.sort()
900 for fn in ffiles:
899 for fn in ffiles:
901 if badmatch and badmatch(fn):
900 if badmatch and badmatch(fn):
902 if match(fn):
901 if match(fn):
903 yield 'b', fn
902 yield 'b', fn
904 else:
903 else:
905 self.ui.warn(_('%s: No such file in rev %s\n')
904 self.ui.warn(_('%s: No such file in rev %s\n')
906 % (self.pathto(fn), short(node)))
905 % (self.pathto(fn), short(node)))
907 else:
906 else:
908 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
907 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
909 yield src, fn
908 yield src, fn
910
909
911 def status(self, node1=None, node2=None, files=[], match=util.always,
910 def status(self, node1=None, node2=None, files=[], match=util.always,
912 wlock=None, list_ignored=False, list_clean=False):
911 wlock=None, list_ignored=False, list_clean=False):
913 """return status of files between two nodes or node and working directory
912 """return status of files between two nodes or node and working directory
914
913
915 If node1 is None, use the first dirstate parent instead.
914 If node1 is None, use the first dirstate parent instead.
916 If node2 is None, compare node1 with working directory.
915 If node2 is None, compare node1 with working directory.
917 """
916 """
918
917
919 def fcmp(fn, getnode):
918 def fcmp(fn, getnode):
920 t1 = self.wread(fn)
919 t1 = self.wread(fn)
921 return self.file(fn).cmp(getnode(fn), t1)
920 return self.file(fn).cmp(getnode(fn), t1)
922
921
923 def mfmatches(node):
922 def mfmatches(node):
924 change = self.changelog.read(node)
923 change = self.changelog.read(node)
925 mf = self.manifest.read(change[0]).copy()
924 mf = self.manifest.read(change[0]).copy()
926 for fn in mf.keys():
925 for fn in mf.keys():
927 if not match(fn):
926 if not match(fn):
928 del mf[fn]
927 del mf[fn]
929 return mf
928 return mf
930
929
931 modified, added, removed, deleted, unknown = [], [], [], [], []
930 modified, added, removed, deleted, unknown = [], [], [], [], []
932 ignored, clean = [], []
931 ignored, clean = [], []
933
932
934 compareworking = False
933 compareworking = False
935 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
934 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
936 compareworking = True
935 compareworking = True
937
936
938 if not compareworking:
937 if not compareworking:
939 # read the manifest from node1 before the manifest from node2,
938 # read the manifest from node1 before the manifest from node2,
940 # so that we'll hit the manifest cache if we're going through
939 # so that we'll hit the manifest cache if we're going through
941 # all the revisions in parent->child order.
940 # all the revisions in parent->child order.
942 mf1 = mfmatches(node1)
941 mf1 = mfmatches(node1)
943
942
944 mywlock = False
943 mywlock = False
945
944
946 # are we comparing the working directory?
945 # are we comparing the working directory?
947 if not node2:
946 if not node2:
948 (lookup, modified, added, removed, deleted, unknown,
947 (lookup, modified, added, removed, deleted, unknown,
949 ignored, clean) = self.dirstate.status(files, match,
948 ignored, clean) = self.dirstate.status(files, match,
950 list_ignored, list_clean)
949 list_ignored, list_clean)
951
950
952 # are we comparing working dir against its parent?
951 # are we comparing working dir against its parent?
953 if compareworking:
952 if compareworking:
954 if lookup:
953 if lookup:
955 # do a full compare of any files that might have changed
954 # do a full compare of any files that might have changed
956 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
955 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
957 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
956 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
958 nullid)
957 nullid)
959 for f in lookup:
958 for f in lookup:
960 if fcmp(f, getnode):
959 if fcmp(f, getnode):
961 modified.append(f)
960 modified.append(f)
962 else:
961 else:
963 if list_clean:
962 if list_clean:
964 clean.append(f)
963 clean.append(f)
965 if not wlock and not mywlock:
964 if not wlock and not mywlock:
966 mywlock = True
965 mywlock = True
967 try:
966 try:
968 wlock = self.wlock(wait=0)
967 wlock = self.wlock(wait=0)
969 except lock.LockException:
968 except lock.LockException:
970 pass
969 pass
971 if wlock:
970 if wlock:
972 self.dirstate.update([f], "n")
971 self.dirstate.update([f], "n")
973 else:
972 else:
974 # we are comparing working dir against non-parent
973 # we are comparing working dir against non-parent
975 # generate a pseudo-manifest for the working dir
974 # generate a pseudo-manifest for the working dir
976 # XXX: create it in dirstate.py ?
975 # XXX: create it in dirstate.py ?
977 mf2 = mfmatches(self.dirstate.parents()[0])
976 mf2 = mfmatches(self.dirstate.parents()[0])
978 is_exec = util.execfunc(self.root, mf2.execf)
977 is_exec = util.execfunc(self.root, mf2.execf)
979 is_link = util.linkfunc(self.root, mf2.linkf)
978 is_link = util.linkfunc(self.root, mf2.linkf)
980 for f in lookup + modified + added:
979 for f in lookup + modified + added:
981 mf2[f] = ""
980 mf2[f] = ""
982 mf2.set(f, is_exec(f), is_link(f))
981 mf2.set(f, is_exec(f), is_link(f))
983 for f in removed:
982 for f in removed:
984 if f in mf2:
983 if f in mf2:
985 del mf2[f]
984 del mf2[f]
986
985
987 if mywlock and wlock:
986 if mywlock and wlock:
988 wlock.release()
987 wlock.release()
989 else:
988 else:
990 # we are comparing two revisions
989 # we are comparing two revisions
991 mf2 = mfmatches(node2)
990 mf2 = mfmatches(node2)
992
991
993 if not compareworking:
992 if not compareworking:
994 # flush lists from dirstate before comparing manifests
993 # flush lists from dirstate before comparing manifests
995 modified, added, clean = [], [], []
994 modified, added, clean = [], [], []
996
995
997 # make sure to sort the files so we talk to the disk in a
996 # make sure to sort the files so we talk to the disk in a
998 # reasonable order
997 # reasonable order
999 mf2keys = mf2.keys()
998 mf2keys = mf2.keys()
1000 mf2keys.sort()
999 mf2keys.sort()
1001 getnode = lambda fn: mf1.get(fn, nullid)
1000 getnode = lambda fn: mf1.get(fn, nullid)
1002 for fn in mf2keys:
1001 for fn in mf2keys:
1003 if mf1.has_key(fn):
1002 if mf1.has_key(fn):
1004 if mf1.flags(fn) != mf2.flags(fn) or \
1003 if mf1.flags(fn) != mf2.flags(fn) or \
1005 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1004 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1006 fcmp(fn, getnode))):
1005 fcmp(fn, getnode))):
1007 modified.append(fn)
1006 modified.append(fn)
1008 elif list_clean:
1007 elif list_clean:
1009 clean.append(fn)
1008 clean.append(fn)
1010 del mf1[fn]
1009 del mf1[fn]
1011 else:
1010 else:
1012 added.append(fn)
1011 added.append(fn)
1013
1012
1014 removed = mf1.keys()
1013 removed = mf1.keys()
1015
1014
1016 # sort and return results:
1015 # sort and return results:
1017 for l in modified, added, removed, deleted, unknown, ignored, clean:
1016 for l in modified, added, removed, deleted, unknown, ignored, clean:
1018 l.sort()
1017 l.sort()
1019 return (modified, added, removed, deleted, unknown, ignored, clean)
1018 return (modified, added, removed, deleted, unknown, ignored, clean)
1020
1019
1021 def add(self, list, wlock=None):
1020 def add(self, list, wlock=None):
1022 if not wlock:
1021 if not wlock:
1023 wlock = self.wlock()
1022 wlock = self.wlock()
1024 for f in list:
1023 for f in list:
1025 p = self.wjoin(f)
1024 p = self.wjoin(f)
1026 try:
1025 try:
1027 st = os.lstat(p)
1026 st = os.lstat(p)
1028 except:
1027 except:
1029 self.ui.warn(_("%s does not exist!\n") % f)
1028 self.ui.warn(_("%s does not exist!\n") % f)
1030 continue
1029 continue
1031 if st.st_size > 10000000:
1030 if st.st_size > 10000000:
1032 self.ui.warn(_("%s: files over 10MB may cause memory and"
1031 self.ui.warn(_("%s: files over 10MB may cause memory and"
1033 " performance problems\n"
1032 " performance problems\n"
1034 "(use 'hg revert %s' to unadd the file)\n")
1033 "(use 'hg revert %s' to unadd the file)\n")
1035 % (f, f))
1034 % (f, f))
1036 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1035 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1037 self.ui.warn(_("%s not added: only files and symlinks "
1036 self.ui.warn(_("%s not added: only files and symlinks "
1038 "supported currently\n") % f)
1037 "supported currently\n") % f)
1039 elif self.dirstate.state(f) in 'an':
1038 elif self.dirstate.state(f) in 'an':
1040 self.ui.warn(_("%s already tracked!\n") % f)
1039 self.ui.warn(_("%s already tracked!\n") % f)
1041 else:
1040 else:
1042 self.dirstate.update([f], "a")
1041 self.dirstate.update([f], "a")
1043
1042
1044 def forget(self, list, wlock=None):
1043 def forget(self, list, wlock=None):
1045 if not wlock:
1044 if not wlock:
1046 wlock = self.wlock()
1045 wlock = self.wlock()
1047 for f in list:
1046 for f in list:
1048 if self.dirstate.state(f) not in 'ai':
1047 if self.dirstate.state(f) not in 'ai':
1049 self.ui.warn(_("%s not added!\n") % f)
1048 self.ui.warn(_("%s not added!\n") % f)
1050 else:
1049 else:
1051 self.dirstate.forget([f])
1050 self.dirstate.forget([f])
1052
1051
1053 def remove(self, list, unlink=False, wlock=None):
1052 def remove(self, list, unlink=False, wlock=None):
1054 if unlink:
1053 if unlink:
1055 for f in list:
1054 for f in list:
1056 try:
1055 try:
1057 util.unlink(self.wjoin(f))
1056 util.unlink(self.wjoin(f))
1058 except OSError, inst:
1057 except OSError, inst:
1059 if inst.errno != errno.ENOENT:
1058 if inst.errno != errno.ENOENT:
1060 raise
1059 raise
1061 if not wlock:
1060 if not wlock:
1062 wlock = self.wlock()
1061 wlock = self.wlock()
1063 for f in list:
1062 for f in list:
1064 if unlink and os.path.exists(self.wjoin(f)):
1063 if unlink and os.path.exists(self.wjoin(f)):
1065 self.ui.warn(_("%s still exists!\n") % f)
1064 self.ui.warn(_("%s still exists!\n") % f)
1066 elif self.dirstate.state(f) == 'a':
1065 elif self.dirstate.state(f) == 'a':
1067 self.dirstate.forget([f])
1066 self.dirstate.forget([f])
1068 elif f not in self.dirstate:
1067 elif f not in self.dirstate:
1069 self.ui.warn(_("%s not tracked!\n") % f)
1068 self.ui.warn(_("%s not tracked!\n") % f)
1070 else:
1069 else:
1071 self.dirstate.update([f], "r")
1070 self.dirstate.update([f], "r")
1072
1071
1073 def undelete(self, list, wlock=None):
1072 def undelete(self, list, wlock=None):
1074 p = self.dirstate.parents()[0]
1073 p = self.dirstate.parents()[0]
1075 mn = self.changelog.read(p)[0]
1074 mn = self.changelog.read(p)[0]
1076 m = self.manifest.read(mn)
1075 m = self.manifest.read(mn)
1077 if not wlock:
1076 if not wlock:
1078 wlock = self.wlock()
1077 wlock = self.wlock()
1079 for f in list:
1078 for f in list:
1080 if self.dirstate.state(f) not in "r":
1079 if self.dirstate.state(f) not in "r":
1081 self.ui.warn("%s not removed!\n" % f)
1080 self.ui.warn("%s not removed!\n" % f)
1082 else:
1081 else:
1083 t = self.file(f).read(m[f])
1082 t = self.file(f).read(m[f])
1084 self.wwrite(f, t, m.flags(f))
1083 self.wwrite(f, t, m.flags(f))
1085 self.dirstate.update([f], "n")
1084 self.dirstate.update([f], "n")
1086
1085
1087 def copy(self, source, dest, wlock=None):
1086 def copy(self, source, dest, wlock=None):
1088 p = self.wjoin(dest)
1087 p = self.wjoin(dest)
1089 if not (os.path.exists(p) or os.path.islink(p)):
1088 if not (os.path.exists(p) or os.path.islink(p)):
1090 self.ui.warn(_("%s does not exist!\n") % dest)
1089 self.ui.warn(_("%s does not exist!\n") % dest)
1091 elif not (os.path.isfile(p) or os.path.islink(p)):
1090 elif not (os.path.isfile(p) or os.path.islink(p)):
1092 self.ui.warn(_("copy failed: %s is not a file or a "
1091 self.ui.warn(_("copy failed: %s is not a file or a "
1093 "symbolic link\n") % dest)
1092 "symbolic link\n") % dest)
1094 else:
1093 else:
1095 if not wlock:
1094 if not wlock:
1096 wlock = self.wlock()
1095 wlock = self.wlock()
1097 if self.dirstate.state(dest) == '?':
1096 if self.dirstate.state(dest) == '?':
1098 self.dirstate.update([dest], "a")
1097 self.dirstate.update([dest], "a")
1099 self.dirstate.copy(source, dest)
1098 self.dirstate.copy(source, dest)
1100
1099
1101 def heads(self, start=None):
1100 def heads(self, start=None):
1102 heads = self.changelog.heads(start)
1101 heads = self.changelog.heads(start)
1103 # sort the output in rev descending order
1102 # sort the output in rev descending order
1104 heads = [(-self.changelog.rev(h), h) for h in heads]
1103 heads = [(-self.changelog.rev(h), h) for h in heads]
1105 heads.sort()
1104 heads.sort()
1106 return [n for (r, n) in heads]
1105 return [n for (r, n) in heads]
1107
1106
1108 def branches(self, nodes):
1107 def branches(self, nodes):
1109 if not nodes:
1108 if not nodes:
1110 nodes = [self.changelog.tip()]
1109 nodes = [self.changelog.tip()]
1111 b = []
1110 b = []
1112 for n in nodes:
1111 for n in nodes:
1113 t = n
1112 t = n
1114 while 1:
1113 while 1:
1115 p = self.changelog.parents(n)
1114 p = self.changelog.parents(n)
1116 if p[1] != nullid or p[0] == nullid:
1115 if p[1] != nullid or p[0] == nullid:
1117 b.append((t, n, p[0], p[1]))
1116 b.append((t, n, p[0], p[1]))
1118 break
1117 break
1119 n = p[0]
1118 n = p[0]
1120 return b
1119 return b
1121
1120
1122 def between(self, pairs):
1121 def between(self, pairs):
1123 r = []
1122 r = []
1124
1123
1125 for top, bottom in pairs:
1124 for top, bottom in pairs:
1126 n, l, i = top, [], 0
1125 n, l, i = top, [], 0
1127 f = 1
1126 f = 1
1128
1127
1129 while n != bottom:
1128 while n != bottom:
1130 p = self.changelog.parents(n)[0]
1129 p = self.changelog.parents(n)[0]
1131 if i == f:
1130 if i == f:
1132 l.append(n)
1131 l.append(n)
1133 f = f * 2
1132 f = f * 2
1134 n = p
1133 n = p
1135 i += 1
1134 i += 1
1136
1135
1137 r.append(l)
1136 r.append(l)
1138
1137
1139 return r
1138 return r
1140
1139
1141 def findincoming(self, remote, base=None, heads=None, force=False):
1140 def findincoming(self, remote, base=None, heads=None, force=False):
1142 """Return list of roots of the subsets of missing nodes from remote
1141 """Return list of roots of the subsets of missing nodes from remote
1143
1142
1144 If base dict is specified, assume that these nodes and their parents
1143 If base dict is specified, assume that these nodes and their parents
1145 exist on the remote side and that no child of a node of base exists
1144 exist on the remote side and that no child of a node of base exists
1146 in both remote and self.
1145 in both remote and self.
1147 Furthermore base will be updated to include the nodes that exists
1146 Furthermore base will be updated to include the nodes that exists
1148 in self and remote but no children exists in self and remote.
1147 in self and remote but no children exists in self and remote.
1149 If a list of heads is specified, return only nodes which are heads
1148 If a list of heads is specified, return only nodes which are heads
1150 or ancestors of these heads.
1149 or ancestors of these heads.
1151
1150
1152 All the ancestors of base are in self and in remote.
1151 All the ancestors of base are in self and in remote.
1153 All the descendants of the list returned are missing in self.
1152 All the descendants of the list returned are missing in self.
1154 (and so we know that the rest of the nodes are missing in remote, see
1153 (and so we know that the rest of the nodes are missing in remote, see
1155 outgoing)
1154 outgoing)
1156 """
1155 """
1157 m = self.changelog.nodemap
1156 m = self.changelog.nodemap
1158 search = []
1157 search = []
1159 fetch = {}
1158 fetch = {}
1160 seen = {}
1159 seen = {}
1161 seenbranch = {}
1160 seenbranch = {}
1162 if base == None:
1161 if base == None:
1163 base = {}
1162 base = {}
1164
1163
1165 if not heads:
1164 if not heads:
1166 heads = remote.heads()
1165 heads = remote.heads()
1167
1166
1168 if self.changelog.tip() == nullid:
1167 if self.changelog.tip() == nullid:
1169 base[nullid] = 1
1168 base[nullid] = 1
1170 if heads != [nullid]:
1169 if heads != [nullid]:
1171 return [nullid]
1170 return [nullid]
1172 return []
1171 return []
1173
1172
1174 # assume we're closer to the tip than the root
1173 # assume we're closer to the tip than the root
1175 # and start by examining the heads
1174 # and start by examining the heads
1176 self.ui.status(_("searching for changes\n"))
1175 self.ui.status(_("searching for changes\n"))
1177
1176
1178 unknown = []
1177 unknown = []
1179 for h in heads:
1178 for h in heads:
1180 if h not in m:
1179 if h not in m:
1181 unknown.append(h)
1180 unknown.append(h)
1182 else:
1181 else:
1183 base[h] = 1
1182 base[h] = 1
1184
1183
1185 if not unknown:
1184 if not unknown:
1186 return []
1185 return []
1187
1186
1188 req = dict.fromkeys(unknown)
1187 req = dict.fromkeys(unknown)
1189 reqcnt = 0
1188 reqcnt = 0
1190
1189
1191 # search through remote branches
1190 # search through remote branches
1192 # a 'branch' here is a linear segment of history, with four parts:
1191 # a 'branch' here is a linear segment of history, with four parts:
1193 # head, root, first parent, second parent
1192 # head, root, first parent, second parent
1194 # (a branch always has two parents (or none) by definition)
1193 # (a branch always has two parents (or none) by definition)
1195 unknown = remote.branches(unknown)
1194 unknown = remote.branches(unknown)
1196 while unknown:
1195 while unknown:
1197 r = []
1196 r = []
1198 while unknown:
1197 while unknown:
1199 n = unknown.pop(0)
1198 n = unknown.pop(0)
1200 if n[0] in seen:
1199 if n[0] in seen:
1201 continue
1200 continue
1202
1201
1203 self.ui.debug(_("examining %s:%s\n")
1202 self.ui.debug(_("examining %s:%s\n")
1204 % (short(n[0]), short(n[1])))
1203 % (short(n[0]), short(n[1])))
1205 if n[0] == nullid: # found the end of the branch
1204 if n[0] == nullid: # found the end of the branch
1206 pass
1205 pass
1207 elif n in seenbranch:
1206 elif n in seenbranch:
1208 self.ui.debug(_("branch already found\n"))
1207 self.ui.debug(_("branch already found\n"))
1209 continue
1208 continue
1210 elif n[1] and n[1] in m: # do we know the base?
1209 elif n[1] and n[1] in m: # do we know the base?
1211 self.ui.debug(_("found incomplete branch %s:%s\n")
1210 self.ui.debug(_("found incomplete branch %s:%s\n")
1212 % (short(n[0]), short(n[1])))
1211 % (short(n[0]), short(n[1])))
1213 search.append(n) # schedule branch range for scanning
1212 search.append(n) # schedule branch range for scanning
1214 seenbranch[n] = 1
1213 seenbranch[n] = 1
1215 else:
1214 else:
1216 if n[1] not in seen and n[1] not in fetch:
1215 if n[1] not in seen and n[1] not in fetch:
1217 if n[2] in m and n[3] in m:
1216 if n[2] in m and n[3] in m:
1218 self.ui.debug(_("found new changeset %s\n") %
1217 self.ui.debug(_("found new changeset %s\n") %
1219 short(n[1]))
1218 short(n[1]))
1220 fetch[n[1]] = 1 # earliest unknown
1219 fetch[n[1]] = 1 # earliest unknown
1221 for p in n[2:4]:
1220 for p in n[2:4]:
1222 if p in m:
1221 if p in m:
1223 base[p] = 1 # latest known
1222 base[p] = 1 # latest known
1224
1223
1225 for p in n[2:4]:
1224 for p in n[2:4]:
1226 if p not in req and p not in m:
1225 if p not in req and p not in m:
1227 r.append(p)
1226 r.append(p)
1228 req[p] = 1
1227 req[p] = 1
1229 seen[n[0]] = 1
1228 seen[n[0]] = 1
1230
1229
1231 if r:
1230 if r:
1232 reqcnt += 1
1231 reqcnt += 1
1233 self.ui.debug(_("request %d: %s\n") %
1232 self.ui.debug(_("request %d: %s\n") %
1234 (reqcnt, " ".join(map(short, r))))
1233 (reqcnt, " ".join(map(short, r))))
1235 for p in xrange(0, len(r), 10):
1234 for p in xrange(0, len(r), 10):
1236 for b in remote.branches(r[p:p+10]):
1235 for b in remote.branches(r[p:p+10]):
1237 self.ui.debug(_("received %s:%s\n") %
1236 self.ui.debug(_("received %s:%s\n") %
1238 (short(b[0]), short(b[1])))
1237 (short(b[0]), short(b[1])))
1239 unknown.append(b)
1238 unknown.append(b)
1240
1239
1241 # do binary search on the branches we found
1240 # do binary search on the branches we found
1242 while search:
1241 while search:
1243 n = search.pop(0)
1242 n = search.pop(0)
1244 reqcnt += 1
1243 reqcnt += 1
1245 l = remote.between([(n[0], n[1])])[0]
1244 l = remote.between([(n[0], n[1])])[0]
1246 l.append(n[1])
1245 l.append(n[1])
1247 p = n[0]
1246 p = n[0]
1248 f = 1
1247 f = 1
1249 for i in l:
1248 for i in l:
1250 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1249 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1251 if i in m:
1250 if i in m:
1252 if f <= 2:
1251 if f <= 2:
1253 self.ui.debug(_("found new branch changeset %s\n") %
1252 self.ui.debug(_("found new branch changeset %s\n") %
1254 short(p))
1253 short(p))
1255 fetch[p] = 1
1254 fetch[p] = 1
1256 base[i] = 1
1255 base[i] = 1
1257 else:
1256 else:
1258 self.ui.debug(_("narrowed branch search to %s:%s\n")
1257 self.ui.debug(_("narrowed branch search to %s:%s\n")
1259 % (short(p), short(i)))
1258 % (short(p), short(i)))
1260 search.append((p, i))
1259 search.append((p, i))
1261 break
1260 break
1262 p, f = i, f * 2
1261 p, f = i, f * 2
1263
1262
1264 # sanity check our fetch list
1263 # sanity check our fetch list
1265 for f in fetch.keys():
1264 for f in fetch.keys():
1266 if f in m:
1265 if f in m:
1267 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1266 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1268
1267
1269 if base.keys() == [nullid]:
1268 if base.keys() == [nullid]:
1270 if force:
1269 if force:
1271 self.ui.warn(_("warning: repository is unrelated\n"))
1270 self.ui.warn(_("warning: repository is unrelated\n"))
1272 else:
1271 else:
1273 raise util.Abort(_("repository is unrelated"))
1272 raise util.Abort(_("repository is unrelated"))
1274
1273
1275 self.ui.debug(_("found new changesets starting at ") +
1274 self.ui.debug(_("found new changesets starting at ") +
1276 " ".join([short(f) for f in fetch]) + "\n")
1275 " ".join([short(f) for f in fetch]) + "\n")
1277
1276
1278 self.ui.debug(_("%d total queries\n") % reqcnt)
1277 self.ui.debug(_("%d total queries\n") % reqcnt)
1279
1278
1280 return fetch.keys()
1279 return fetch.keys()
1281
1280
1282 def findoutgoing(self, remote, base=None, heads=None, force=False):
1281 def findoutgoing(self, remote, base=None, heads=None, force=False):
1283 """Return list of nodes that are roots of subsets not in remote
1282 """Return list of nodes that are roots of subsets not in remote
1284
1283
1285 If base dict is specified, assume that these nodes and their parents
1284 If base dict is specified, assume that these nodes and their parents
1286 exist on the remote side.
1285 exist on the remote side.
1287 If a list of heads is specified, return only nodes which are heads
1286 If a list of heads is specified, return only nodes which are heads
1288 or ancestors of these heads, and return a second element which
1287 or ancestors of these heads, and return a second element which
1289 contains all remote heads which get new children.
1288 contains all remote heads which get new children.
1290 """
1289 """
1291 if base == None:
1290 if base == None:
1292 base = {}
1291 base = {}
1293 self.findincoming(remote, base, heads, force=force)
1292 self.findincoming(remote, base, heads, force=force)
1294
1293
1295 self.ui.debug(_("common changesets up to ")
1294 self.ui.debug(_("common changesets up to ")
1296 + " ".join(map(short, base.keys())) + "\n")
1295 + " ".join(map(short, base.keys())) + "\n")
1297
1296
1298 remain = dict.fromkeys(self.changelog.nodemap)
1297 remain = dict.fromkeys(self.changelog.nodemap)
1299
1298
1300 # prune everything remote has from the tree
1299 # prune everything remote has from the tree
1301 del remain[nullid]
1300 del remain[nullid]
1302 remove = base.keys()
1301 remove = base.keys()
1303 while remove:
1302 while remove:
1304 n = remove.pop(0)
1303 n = remove.pop(0)
1305 if n in remain:
1304 if n in remain:
1306 del remain[n]
1305 del remain[n]
1307 for p in self.changelog.parents(n):
1306 for p in self.changelog.parents(n):
1308 remove.append(p)
1307 remove.append(p)
1309
1308
1310 # find every node whose parents have been pruned
1309 # find every node whose parents have been pruned
1311 subset = []
1310 subset = []
1312 # find every remote head that will get new children
1311 # find every remote head that will get new children
1313 updated_heads = {}
1312 updated_heads = {}
1314 for n in remain:
1313 for n in remain:
1315 p1, p2 = self.changelog.parents(n)
1314 p1, p2 = self.changelog.parents(n)
1316 if p1 not in remain and p2 not in remain:
1315 if p1 not in remain and p2 not in remain:
1317 subset.append(n)
1316 subset.append(n)
1318 if heads:
1317 if heads:
1319 if p1 in heads:
1318 if p1 in heads:
1320 updated_heads[p1] = True
1319 updated_heads[p1] = True
1321 if p2 in heads:
1320 if p2 in heads:
1322 updated_heads[p2] = True
1321 updated_heads[p2] = True
1323
1322
1324 # this is the set of all roots we have to push
1323 # this is the set of all roots we have to push
1325 if heads:
1324 if heads:
1326 return subset, updated_heads.keys()
1325 return subset, updated_heads.keys()
1327 else:
1326 else:
1328 return subset
1327 return subset
1329
1328
1330 def pull(self, remote, heads=None, force=False, lock=None):
1329 def pull(self, remote, heads=None, force=False, lock=None):
1331 mylock = False
1330 mylock = False
1332 if not lock:
1331 if not lock:
1333 lock = self.lock()
1332 lock = self.lock()
1334 mylock = True
1333 mylock = True
1335
1334
1336 try:
1335 try:
1337 fetch = self.findincoming(remote, force=force)
1336 fetch = self.findincoming(remote, force=force)
1338 if fetch == [nullid]:
1337 if fetch == [nullid]:
1339 self.ui.status(_("requesting all changes\n"))
1338 self.ui.status(_("requesting all changes\n"))
1340
1339
1341 if not fetch:
1340 if not fetch:
1342 self.ui.status(_("no changes found\n"))
1341 self.ui.status(_("no changes found\n"))
1343 return 0
1342 return 0
1344
1343
1345 if heads is None:
1344 if heads is None:
1346 cg = remote.changegroup(fetch, 'pull')
1345 cg = remote.changegroup(fetch, 'pull')
1347 else:
1346 else:
1348 if 'changegroupsubset' not in remote.capabilities:
1347 if 'changegroupsubset' not in remote.capabilities:
1349 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1348 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1350 cg = remote.changegroupsubset(fetch, heads, 'pull')
1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1351 return self.addchangegroup(cg, 'pull', remote.url())
1350 return self.addchangegroup(cg, 'pull', remote.url())
1352 finally:
1351 finally:
1353 if mylock:
1352 if mylock:
1354 lock.release()
1353 lock.release()
1355
1354
1356 def push(self, remote, force=False, revs=None):
1355 def push(self, remote, force=False, revs=None):
1357 # there are two ways to push to remote repo:
1356 # there are two ways to push to remote repo:
1358 #
1357 #
1359 # addchangegroup assumes local user can lock remote
1358 # addchangegroup assumes local user can lock remote
1360 # repo (local filesystem, old ssh servers).
1359 # repo (local filesystem, old ssh servers).
1361 #
1360 #
1362 # unbundle assumes local user cannot lock remote repo (new ssh
1361 # unbundle assumes local user cannot lock remote repo (new ssh
1363 # servers, http servers).
1362 # servers, http servers).
1364
1363
1365 if remote.capable('unbundle'):
1364 if remote.capable('unbundle'):
1366 return self.push_unbundle(remote, force, revs)
1365 return self.push_unbundle(remote, force, revs)
1367 return self.push_addchangegroup(remote, force, revs)
1366 return self.push_addchangegroup(remote, force, revs)
1368
1367
1369 def prepush(self, remote, force, revs):
1368 def prepush(self, remote, force, revs):
1370 base = {}
1369 base = {}
1371 remote_heads = remote.heads()
1370 remote_heads = remote.heads()
1372 inc = self.findincoming(remote, base, remote_heads, force=force)
1371 inc = self.findincoming(remote, base, remote_heads, force=force)
1373
1372
1374 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1373 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1375 if revs is not None:
1374 if revs is not None:
1376 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1375 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1377 else:
1376 else:
1378 bases, heads = update, self.changelog.heads()
1377 bases, heads = update, self.changelog.heads()
1379
1378
1380 if not bases:
1379 if not bases:
1381 self.ui.status(_("no changes found\n"))
1380 self.ui.status(_("no changes found\n"))
1382 return None, 1
1381 return None, 1
1383 elif not force:
1382 elif not force:
1384 # check if we're creating new remote heads
1383 # check if we're creating new remote heads
1385 # to be a remote head after push, node must be either
1384 # to be a remote head after push, node must be either
1386 # - unknown locally
1385 # - unknown locally
1387 # - a local outgoing head descended from update
1386 # - a local outgoing head descended from update
1388 # - a remote head that's known locally and not
1387 # - a remote head that's known locally and not
1389 # ancestral to an outgoing head
1388 # ancestral to an outgoing head
1390
1389
1391 warn = 0
1390 warn = 0
1392
1391
1393 if remote_heads == [nullid]:
1392 if remote_heads == [nullid]:
1394 warn = 0
1393 warn = 0
1395 elif not revs and len(heads) > len(remote_heads):
1394 elif not revs and len(heads) > len(remote_heads):
1396 warn = 1
1395 warn = 1
1397 else:
1396 else:
1398 newheads = list(heads)
1397 newheads = list(heads)
1399 for r in remote_heads:
1398 for r in remote_heads:
1400 if r in self.changelog.nodemap:
1399 if r in self.changelog.nodemap:
1401 desc = self.changelog.heads(r, heads)
1400 desc = self.changelog.heads(r, heads)
1402 l = [h for h in heads if h in desc]
1401 l = [h for h in heads if h in desc]
1403 if not l:
1402 if not l:
1404 newheads.append(r)
1403 newheads.append(r)
1405 else:
1404 else:
1406 newheads.append(r)
1405 newheads.append(r)
1407 if len(newheads) > len(remote_heads):
1406 if len(newheads) > len(remote_heads):
1408 warn = 1
1407 warn = 1
1409
1408
1410 if warn:
1409 if warn:
1411 self.ui.warn(_("abort: push creates new remote branches!\n"))
1410 self.ui.warn(_("abort: push creates new remote branches!\n"))
1412 self.ui.status(_("(did you forget to merge?"
1411 self.ui.status(_("(did you forget to merge?"
1413 " use push -f to force)\n"))
1412 " use push -f to force)\n"))
1414 return None, 1
1413 return None, 1
1415 elif inc:
1414 elif inc:
1416 self.ui.warn(_("note: unsynced remote changes!\n"))
1415 self.ui.warn(_("note: unsynced remote changes!\n"))
1417
1416
1418
1417
1419 if revs is None:
1418 if revs is None:
1420 cg = self.changegroup(update, 'push')
1419 cg = self.changegroup(update, 'push')
1421 else:
1420 else:
1422 cg = self.changegroupsubset(update, revs, 'push')
1421 cg = self.changegroupsubset(update, revs, 'push')
1423 return cg, remote_heads
1422 return cg, remote_heads
1424
1423
1425 def push_addchangegroup(self, remote, force, revs):
1424 def push_addchangegroup(self, remote, force, revs):
1426 lock = remote.lock()
1425 lock = remote.lock()
1427
1426
1428 ret = self.prepush(remote, force, revs)
1427 ret = self.prepush(remote, force, revs)
1429 if ret[0] is not None:
1428 if ret[0] is not None:
1430 cg, remote_heads = ret
1429 cg, remote_heads = ret
1431 return remote.addchangegroup(cg, 'push', self.url())
1430 return remote.addchangegroup(cg, 'push', self.url())
1432 return ret[1]
1431 return ret[1]
1433
1432
1434 def push_unbundle(self, remote, force, revs):
1433 def push_unbundle(self, remote, force, revs):
1435 # local repo finds heads on server, finds out what revs it
1434 # local repo finds heads on server, finds out what revs it
1436 # must push. once revs transferred, if server finds it has
1435 # must push. once revs transferred, if server finds it has
1437 # different heads (someone else won commit/push race), server
1436 # different heads (someone else won commit/push race), server
1438 # aborts.
1437 # aborts.
1439
1438
1440 ret = self.prepush(remote, force, revs)
1439 ret = self.prepush(remote, force, revs)
1441 if ret[0] is not None:
1440 if ret[0] is not None:
1442 cg, remote_heads = ret
1441 cg, remote_heads = ret
1443 if force: remote_heads = ['force']
1442 if force: remote_heads = ['force']
1444 return remote.unbundle(cg, remote_heads, 'push')
1443 return remote.unbundle(cg, remote_heads, 'push')
1445 return ret[1]
1444 return ret[1]
1446
1445
1447 def changegroupinfo(self, nodes):
1446 def changegroupinfo(self, nodes):
1448 self.ui.note(_("%d changesets found\n") % len(nodes))
1447 self.ui.note(_("%d changesets found\n") % len(nodes))
1449 if self.ui.debugflag:
1448 if self.ui.debugflag:
1450 self.ui.debug(_("List of changesets:\n"))
1449 self.ui.debug(_("List of changesets:\n"))
1451 for node in nodes:
1450 for node in nodes:
1452 self.ui.debug("%s\n" % hex(node))
1451 self.ui.debug("%s\n" % hex(node))
1453
1452
1454 def changegroupsubset(self, bases, heads, source):
1453 def changegroupsubset(self, bases, heads, source):
1455 """This function generates a changegroup consisting of all the nodes
1454 """This function generates a changegroup consisting of all the nodes
1456 that are descendents of any of the bases, and ancestors of any of
1455 that are descendents of any of the bases, and ancestors of any of
1457 the heads.
1456 the heads.
1458
1457
1459 It is fairly complex as determining which filenodes and which
1458 It is fairly complex as determining which filenodes and which
1460 manifest nodes need to be included for the changeset to be complete
1459 manifest nodes need to be included for the changeset to be complete
1461 is non-trivial.
1460 is non-trivial.
1462
1461
1463 Another wrinkle is doing the reverse, figuring out which changeset in
1462 Another wrinkle is doing the reverse, figuring out which changeset in
1464 the changegroup a particular filenode or manifestnode belongs to."""
1463 the changegroup a particular filenode or manifestnode belongs to."""
1465
1464
1466 self.hook('preoutgoing', throw=True, source=source)
1465 self.hook('preoutgoing', throw=True, source=source)
1467
1466
1468 # Set up some initial variables
1467 # Set up some initial variables
1469 # Make it easy to refer to self.changelog
1468 # Make it easy to refer to self.changelog
1470 cl = self.changelog
1469 cl = self.changelog
1471 # msng is short for missing - compute the list of changesets in this
1470 # msng is short for missing - compute the list of changesets in this
1472 # changegroup.
1471 # changegroup.
1473 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1472 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1474 self.changegroupinfo(msng_cl_lst)
1473 self.changegroupinfo(msng_cl_lst)
1475 # Some bases may turn out to be superfluous, and some heads may be
1474 # Some bases may turn out to be superfluous, and some heads may be
1476 # too. nodesbetween will return the minimal set of bases and heads
1475 # too. nodesbetween will return the minimal set of bases and heads
1477 # necessary to re-create the changegroup.
1476 # necessary to re-create the changegroup.
1478
1477
1479 # Known heads are the list of heads that it is assumed the recipient
1478 # Known heads are the list of heads that it is assumed the recipient
1480 # of this changegroup will know about.
1479 # of this changegroup will know about.
1481 knownheads = {}
1480 knownheads = {}
1482 # We assume that all parents of bases are known heads.
1481 # We assume that all parents of bases are known heads.
1483 for n in bases:
1482 for n in bases:
1484 for p in cl.parents(n):
1483 for p in cl.parents(n):
1485 if p != nullid:
1484 if p != nullid:
1486 knownheads[p] = 1
1485 knownheads[p] = 1
1487 knownheads = knownheads.keys()
1486 knownheads = knownheads.keys()
1488 if knownheads:
1487 if knownheads:
1489 # Now that we know what heads are known, we can compute which
1488 # Now that we know what heads are known, we can compute which
1490 # changesets are known. The recipient must know about all
1489 # changesets are known. The recipient must know about all
1491 # changesets required to reach the known heads from the null
1490 # changesets required to reach the known heads from the null
1492 # changeset.
1491 # changeset.
1493 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1492 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1494 junk = None
1493 junk = None
1495 # Transform the list into an ersatz set.
1494 # Transform the list into an ersatz set.
1496 has_cl_set = dict.fromkeys(has_cl_set)
1495 has_cl_set = dict.fromkeys(has_cl_set)
1497 else:
1496 else:
1498 # If there were no known heads, the recipient cannot be assumed to
1497 # If there were no known heads, the recipient cannot be assumed to
1499 # know about any changesets.
1498 # know about any changesets.
1500 has_cl_set = {}
1499 has_cl_set = {}
1501
1500
1502 # Make it easy to refer to self.manifest
1501 # Make it easy to refer to self.manifest
1503 mnfst = self.manifest
1502 mnfst = self.manifest
1504 # We don't know which manifests are missing yet
1503 # We don't know which manifests are missing yet
1505 msng_mnfst_set = {}
1504 msng_mnfst_set = {}
1506 # Nor do we know which filenodes are missing.
1505 # Nor do we know which filenodes are missing.
1507 msng_filenode_set = {}
1506 msng_filenode_set = {}
1508
1507
1509 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1508 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1510 junk = None
1509 junk = None
1511
1510
1512 # A changeset always belongs to itself, so the changenode lookup
1511 # A changeset always belongs to itself, so the changenode lookup
1513 # function for a changenode is identity.
1512 # function for a changenode is identity.
1514 def identity(x):
1513 def identity(x):
1515 return x
1514 return x
1516
1515
1517 # A function generating function. Sets up an environment for the
1516 # A function generating function. Sets up an environment for the
1518 # inner function.
1517 # inner function.
1519 def cmp_by_rev_func(revlog):
1518 def cmp_by_rev_func(revlog):
1520 # Compare two nodes by their revision number in the environment's
1519 # Compare two nodes by their revision number in the environment's
1521 # revision history. Since the revision number both represents the
1520 # revision history. Since the revision number both represents the
1522 # most efficient order to read the nodes in, and represents a
1521 # most efficient order to read the nodes in, and represents a
1523 # topological sorting of the nodes, this function is often useful.
1522 # topological sorting of the nodes, this function is often useful.
1524 def cmp_by_rev(a, b):
1523 def cmp_by_rev(a, b):
1525 return cmp(revlog.rev(a), revlog.rev(b))
1524 return cmp(revlog.rev(a), revlog.rev(b))
1526 return cmp_by_rev
1525 return cmp_by_rev
1527
1526
1528 # If we determine that a particular file or manifest node must be a
1527 # If we determine that a particular file or manifest node must be a
1529 # node that the recipient of the changegroup will already have, we can
1528 # node that the recipient of the changegroup will already have, we can
1530 # also assume the recipient will have all the parents. This function
1529 # also assume the recipient will have all the parents. This function
1531 # prunes them from the set of missing nodes.
1530 # prunes them from the set of missing nodes.
1532 def prune_parents(revlog, hasset, msngset):
1531 def prune_parents(revlog, hasset, msngset):
1533 haslst = hasset.keys()
1532 haslst = hasset.keys()
1534 haslst.sort(cmp_by_rev_func(revlog))
1533 haslst.sort(cmp_by_rev_func(revlog))
1535 for node in haslst:
1534 for node in haslst:
1536 parentlst = [p for p in revlog.parents(node) if p != nullid]
1535 parentlst = [p for p in revlog.parents(node) if p != nullid]
1537 while parentlst:
1536 while parentlst:
1538 n = parentlst.pop()
1537 n = parentlst.pop()
1539 if n not in hasset:
1538 if n not in hasset:
1540 hasset[n] = 1
1539 hasset[n] = 1
1541 p = [p for p in revlog.parents(n) if p != nullid]
1540 p = [p for p in revlog.parents(n) if p != nullid]
1542 parentlst.extend(p)
1541 parentlst.extend(p)
1543 for n in hasset:
1542 for n in hasset:
1544 msngset.pop(n, None)
1543 msngset.pop(n, None)
1545
1544
1546 # This is a function generating function used to set up an environment
1545 # This is a function generating function used to set up an environment
1547 # for the inner function to execute in.
1546 # for the inner function to execute in.
1548 def manifest_and_file_collector(changedfileset):
1547 def manifest_and_file_collector(changedfileset):
1549 # This is an information gathering function that gathers
1548 # This is an information gathering function that gathers
1550 # information from each changeset node that goes out as part of
1549 # information from each changeset node that goes out as part of
1551 # the changegroup. The information gathered is a list of which
1550 # the changegroup. The information gathered is a list of which
1552 # manifest nodes are potentially required (the recipient may
1551 # manifest nodes are potentially required (the recipient may
1553 # already have them) and total list of all files which were
1552 # already have them) and total list of all files which were
1554 # changed in any changeset in the changegroup.
1553 # changed in any changeset in the changegroup.
1555 #
1554 #
1556 # We also remember the first changenode we saw any manifest
1555 # We also remember the first changenode we saw any manifest
1557 # referenced by so we can later determine which changenode 'owns'
1556 # referenced by so we can later determine which changenode 'owns'
1558 # the manifest.
1557 # the manifest.
1559 def collect_manifests_and_files(clnode):
1558 def collect_manifests_and_files(clnode):
1560 c = cl.read(clnode)
1559 c = cl.read(clnode)
1561 for f in c[3]:
1560 for f in c[3]:
1562 # This is to make sure we only have one instance of each
1561 # This is to make sure we only have one instance of each
1563 # filename string for each filename.
1562 # filename string for each filename.
1564 changedfileset.setdefault(f, f)
1563 changedfileset.setdefault(f, f)
1565 msng_mnfst_set.setdefault(c[0], clnode)
1564 msng_mnfst_set.setdefault(c[0], clnode)
1566 return collect_manifests_and_files
1565 return collect_manifests_and_files
1567
1566
1568 # Figure out which manifest nodes (of the ones we think might be part
1567 # Figure out which manifest nodes (of the ones we think might be part
1569 # of the changegroup) the recipient must know about and remove them
1568 # of the changegroup) the recipient must know about and remove them
1570 # from the changegroup.
1569 # from the changegroup.
1571 def prune_manifests():
1570 def prune_manifests():
1572 has_mnfst_set = {}
1571 has_mnfst_set = {}
1573 for n in msng_mnfst_set:
1572 for n in msng_mnfst_set:
1574 # If a 'missing' manifest thinks it belongs to a changenode
1573 # If a 'missing' manifest thinks it belongs to a changenode
1575 # the recipient is assumed to have, obviously the recipient
1574 # the recipient is assumed to have, obviously the recipient
1576 # must have that manifest.
1575 # must have that manifest.
1577 linknode = cl.node(mnfst.linkrev(n))
1576 linknode = cl.node(mnfst.linkrev(n))
1578 if linknode in has_cl_set:
1577 if linknode in has_cl_set:
1579 has_mnfst_set[n] = 1
1578 has_mnfst_set[n] = 1
1580 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1579 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1581
1580
1582 # Use the information collected in collect_manifests_and_files to say
1581 # Use the information collected in collect_manifests_and_files to say
1583 # which changenode any manifestnode belongs to.
1582 # which changenode any manifestnode belongs to.
1584 def lookup_manifest_link(mnfstnode):
1583 def lookup_manifest_link(mnfstnode):
1585 return msng_mnfst_set[mnfstnode]
1584 return msng_mnfst_set[mnfstnode]
1586
1585
1587 # A function generating function that sets up the initial environment
1586 # A function generating function that sets up the initial environment
1588 # the inner function.
1587 # the inner function.
1589 def filenode_collector(changedfiles):
1588 def filenode_collector(changedfiles):
1590 next_rev = [0]
1589 next_rev = [0]
1591 # This gathers information from each manifestnode included in the
1590 # This gathers information from each manifestnode included in the
1592 # changegroup about which filenodes the manifest node references
1591 # changegroup about which filenodes the manifest node references
1593 # so we can include those in the changegroup too.
1592 # so we can include those in the changegroup too.
1594 #
1593 #
1595 # It also remembers which changenode each filenode belongs to. It
1594 # It also remembers which changenode each filenode belongs to. It
1596 # does this by assuming the a filenode belongs to the changenode
1595 # does this by assuming the a filenode belongs to the changenode
1597 # the first manifest that references it belongs to.
1596 # the first manifest that references it belongs to.
1598 def collect_msng_filenodes(mnfstnode):
1597 def collect_msng_filenodes(mnfstnode):
1599 r = mnfst.rev(mnfstnode)
1598 r = mnfst.rev(mnfstnode)
1600 if r == next_rev[0]:
1599 if r == next_rev[0]:
1601 # If the last rev we looked at was the one just previous,
1600 # If the last rev we looked at was the one just previous,
1602 # we only need to see a diff.
1601 # we only need to see a diff.
1603 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1602 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1604 # For each line in the delta
1603 # For each line in the delta
1605 for dline in delta.splitlines():
1604 for dline in delta.splitlines():
1606 # get the filename and filenode for that line
1605 # get the filename and filenode for that line
1607 f, fnode = dline.split('\0')
1606 f, fnode = dline.split('\0')
1608 fnode = bin(fnode[:40])
1607 fnode = bin(fnode[:40])
1609 f = changedfiles.get(f, None)
1608 f = changedfiles.get(f, None)
1610 # And if the file is in the list of files we care
1609 # And if the file is in the list of files we care
1611 # about.
1610 # about.
1612 if f is not None:
1611 if f is not None:
1613 # Get the changenode this manifest belongs to
1612 # Get the changenode this manifest belongs to
1614 clnode = msng_mnfst_set[mnfstnode]
1613 clnode = msng_mnfst_set[mnfstnode]
1615 # Create the set of filenodes for the file if
1614 # Create the set of filenodes for the file if
1616 # there isn't one already.
1615 # there isn't one already.
1617 ndset = msng_filenode_set.setdefault(f, {})
1616 ndset = msng_filenode_set.setdefault(f, {})
1618 # And set the filenode's changelog node to the
1617 # And set the filenode's changelog node to the
1619 # manifest's if it hasn't been set already.
1618 # manifest's if it hasn't been set already.
1620 ndset.setdefault(fnode, clnode)
1619 ndset.setdefault(fnode, clnode)
1621 else:
1620 else:
1622 # Otherwise we need a full manifest.
1621 # Otherwise we need a full manifest.
1623 m = mnfst.read(mnfstnode)
1622 m = mnfst.read(mnfstnode)
1624 # For every file in we care about.
1623 # For every file in we care about.
1625 for f in changedfiles:
1624 for f in changedfiles:
1626 fnode = m.get(f, None)
1625 fnode = m.get(f, None)
1627 # If it's in the manifest
1626 # If it's in the manifest
1628 if fnode is not None:
1627 if fnode is not None:
1629 # See comments above.
1628 # See comments above.
1630 clnode = msng_mnfst_set[mnfstnode]
1629 clnode = msng_mnfst_set[mnfstnode]
1631 ndset = msng_filenode_set.setdefault(f, {})
1630 ndset = msng_filenode_set.setdefault(f, {})
1632 ndset.setdefault(fnode, clnode)
1631 ndset.setdefault(fnode, clnode)
1633 # Remember the revision we hope to see next.
1632 # Remember the revision we hope to see next.
1634 next_rev[0] = r + 1
1633 next_rev[0] = r + 1
1635 return collect_msng_filenodes
1634 return collect_msng_filenodes
1636
1635
1637 # We have a list of filenodes we think we need for a file, lets remove
1636 # We have a list of filenodes we think we need for a file, lets remove
1638 # all those we now the recipient must have.
1637 # all those we now the recipient must have.
1639 def prune_filenodes(f, filerevlog):
1638 def prune_filenodes(f, filerevlog):
1640 msngset = msng_filenode_set[f]
1639 msngset = msng_filenode_set[f]
1641 hasset = {}
1640 hasset = {}
1642 # If a 'missing' filenode thinks it belongs to a changenode we
1641 # If a 'missing' filenode thinks it belongs to a changenode we
1643 # assume the recipient must have, then the recipient must have
1642 # assume the recipient must have, then the recipient must have
1644 # that filenode.
1643 # that filenode.
1645 for n in msngset:
1644 for n in msngset:
1646 clnode = cl.node(filerevlog.linkrev(n))
1645 clnode = cl.node(filerevlog.linkrev(n))
1647 if clnode in has_cl_set:
1646 if clnode in has_cl_set:
1648 hasset[n] = 1
1647 hasset[n] = 1
1649 prune_parents(filerevlog, hasset, msngset)
1648 prune_parents(filerevlog, hasset, msngset)
1650
1649
1651 # A function generator function that sets up the a context for the
1650 # A function generator function that sets up the a context for the
1652 # inner function.
1651 # inner function.
1653 def lookup_filenode_link_func(fname):
1652 def lookup_filenode_link_func(fname):
1654 msngset = msng_filenode_set[fname]
1653 msngset = msng_filenode_set[fname]
1655 # Lookup the changenode the filenode belongs to.
1654 # Lookup the changenode the filenode belongs to.
1656 def lookup_filenode_link(fnode):
1655 def lookup_filenode_link(fnode):
1657 return msngset[fnode]
1656 return msngset[fnode]
1658 return lookup_filenode_link
1657 return lookup_filenode_link
1659
1658
1660 # Now that we have all theses utility functions to help out and
1659 # Now that we have all theses utility functions to help out and
1661 # logically divide up the task, generate the group.
1660 # logically divide up the task, generate the group.
1662 def gengroup():
1661 def gengroup():
1663 # The set of changed files starts empty.
1662 # The set of changed files starts empty.
1664 changedfiles = {}
1663 changedfiles = {}
1665 # Create a changenode group generator that will call our functions
1664 # Create a changenode group generator that will call our functions
1666 # back to lookup the owning changenode and collect information.
1665 # back to lookup the owning changenode and collect information.
1667 group = cl.group(msng_cl_lst, identity,
1666 group = cl.group(msng_cl_lst, identity,
1668 manifest_and_file_collector(changedfiles))
1667 manifest_and_file_collector(changedfiles))
1669 for chnk in group:
1668 for chnk in group:
1670 yield chnk
1669 yield chnk
1671
1670
1672 # The list of manifests has been collected by the generator
1671 # The list of manifests has been collected by the generator
1673 # calling our functions back.
1672 # calling our functions back.
1674 prune_manifests()
1673 prune_manifests()
1675 msng_mnfst_lst = msng_mnfst_set.keys()
1674 msng_mnfst_lst = msng_mnfst_set.keys()
1676 # Sort the manifestnodes by revision number.
1675 # Sort the manifestnodes by revision number.
1677 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1676 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1678 # Create a generator for the manifestnodes that calls our lookup
1677 # Create a generator for the manifestnodes that calls our lookup
1679 # and data collection functions back.
1678 # and data collection functions back.
1680 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1679 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1681 filenode_collector(changedfiles))
1680 filenode_collector(changedfiles))
1682 for chnk in group:
1681 for chnk in group:
1683 yield chnk
1682 yield chnk
1684
1683
1685 # These are no longer needed, dereference and toss the memory for
1684 # These are no longer needed, dereference and toss the memory for
1686 # them.
1685 # them.
1687 msng_mnfst_lst = None
1686 msng_mnfst_lst = None
1688 msng_mnfst_set.clear()
1687 msng_mnfst_set.clear()
1689
1688
1690 changedfiles = changedfiles.keys()
1689 changedfiles = changedfiles.keys()
1691 changedfiles.sort()
1690 changedfiles.sort()
1692 # Go through all our files in order sorted by name.
1691 # Go through all our files in order sorted by name.
1693 for fname in changedfiles:
1692 for fname in changedfiles:
1694 filerevlog = self.file(fname)
1693 filerevlog = self.file(fname)
1695 # Toss out the filenodes that the recipient isn't really
1694 # Toss out the filenodes that the recipient isn't really
1696 # missing.
1695 # missing.
1697 if msng_filenode_set.has_key(fname):
1696 if msng_filenode_set.has_key(fname):
1698 prune_filenodes(fname, filerevlog)
1697 prune_filenodes(fname, filerevlog)
1699 msng_filenode_lst = msng_filenode_set[fname].keys()
1698 msng_filenode_lst = msng_filenode_set[fname].keys()
1700 else:
1699 else:
1701 msng_filenode_lst = []
1700 msng_filenode_lst = []
1702 # If any filenodes are left, generate the group for them,
1701 # If any filenodes are left, generate the group for them,
1703 # otherwise don't bother.
1702 # otherwise don't bother.
1704 if len(msng_filenode_lst) > 0:
1703 if len(msng_filenode_lst) > 0:
1705 yield changegroup.genchunk(fname)
1704 yield changegroup.genchunk(fname)
1706 # Sort the filenodes by their revision #
1705 # Sort the filenodes by their revision #
1707 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1706 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1708 # Create a group generator and only pass in a changenode
1707 # Create a group generator and only pass in a changenode
1709 # lookup function as we need to collect no information
1708 # lookup function as we need to collect no information
1710 # from filenodes.
1709 # from filenodes.
1711 group = filerevlog.group(msng_filenode_lst,
1710 group = filerevlog.group(msng_filenode_lst,
1712 lookup_filenode_link_func(fname))
1711 lookup_filenode_link_func(fname))
1713 for chnk in group:
1712 for chnk in group:
1714 yield chnk
1713 yield chnk
1715 if msng_filenode_set.has_key(fname):
1714 if msng_filenode_set.has_key(fname):
1716 # Don't need this anymore, toss it to free memory.
1715 # Don't need this anymore, toss it to free memory.
1717 del msng_filenode_set[fname]
1716 del msng_filenode_set[fname]
1718 # Signal that no more groups are left.
1717 # Signal that no more groups are left.
1719 yield changegroup.closechunk()
1718 yield changegroup.closechunk()
1720
1719
1721 if msng_cl_lst:
1720 if msng_cl_lst:
1722 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1721 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1723
1722
1724 return util.chunkbuffer(gengroup())
1723 return util.chunkbuffer(gengroup())
1725
1724
1726 def changegroup(self, basenodes, source):
1725 def changegroup(self, basenodes, source):
1727 """Generate a changegroup of all nodes that we have that a recipient
1726 """Generate a changegroup of all nodes that we have that a recipient
1728 doesn't.
1727 doesn't.
1729
1728
1730 This is much easier than the previous function as we can assume that
1729 This is much easier than the previous function as we can assume that
1731 the recipient has any changenode we aren't sending them."""
1730 the recipient has any changenode we aren't sending them."""
1732
1731
1733 self.hook('preoutgoing', throw=True, source=source)
1732 self.hook('preoutgoing', throw=True, source=source)
1734
1733
1735 cl = self.changelog
1734 cl = self.changelog
1736 nodes = cl.nodesbetween(basenodes, None)[0]
1735 nodes = cl.nodesbetween(basenodes, None)[0]
1737 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1736 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1738 self.changegroupinfo(nodes)
1737 self.changegroupinfo(nodes)
1739
1738
1740 def identity(x):
1739 def identity(x):
1741 return x
1740 return x
1742
1741
1743 def gennodelst(revlog):
1742 def gennodelst(revlog):
1744 for r in xrange(0, revlog.count()):
1743 for r in xrange(0, revlog.count()):
1745 n = revlog.node(r)
1744 n = revlog.node(r)
1746 if revlog.linkrev(n) in revset:
1745 if revlog.linkrev(n) in revset:
1747 yield n
1746 yield n
1748
1747
1749 def changed_file_collector(changedfileset):
1748 def changed_file_collector(changedfileset):
1750 def collect_changed_files(clnode):
1749 def collect_changed_files(clnode):
1751 c = cl.read(clnode)
1750 c = cl.read(clnode)
1752 for fname in c[3]:
1751 for fname in c[3]:
1753 changedfileset[fname] = 1
1752 changedfileset[fname] = 1
1754 return collect_changed_files
1753 return collect_changed_files
1755
1754
1756 def lookuprevlink_func(revlog):
1755 def lookuprevlink_func(revlog):
1757 def lookuprevlink(n):
1756 def lookuprevlink(n):
1758 return cl.node(revlog.linkrev(n))
1757 return cl.node(revlog.linkrev(n))
1759 return lookuprevlink
1758 return lookuprevlink
1760
1759
1761 def gengroup():
1760 def gengroup():
1762 # construct a list of all changed files
1761 # construct a list of all changed files
1763 changedfiles = {}
1762 changedfiles = {}
1764
1763
1765 for chnk in cl.group(nodes, identity,
1764 for chnk in cl.group(nodes, identity,
1766 changed_file_collector(changedfiles)):
1765 changed_file_collector(changedfiles)):
1767 yield chnk
1766 yield chnk
1768 changedfiles = changedfiles.keys()
1767 changedfiles = changedfiles.keys()
1769 changedfiles.sort()
1768 changedfiles.sort()
1770
1769
1771 mnfst = self.manifest
1770 mnfst = self.manifest
1772 nodeiter = gennodelst(mnfst)
1771 nodeiter = gennodelst(mnfst)
1773 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1772 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1774 yield chnk
1773 yield chnk
1775
1774
1776 for fname in changedfiles:
1775 for fname in changedfiles:
1777 filerevlog = self.file(fname)
1776 filerevlog = self.file(fname)
1778 nodeiter = gennodelst(filerevlog)
1777 nodeiter = gennodelst(filerevlog)
1779 nodeiter = list(nodeiter)
1778 nodeiter = list(nodeiter)
1780 if nodeiter:
1779 if nodeiter:
1781 yield changegroup.genchunk(fname)
1780 yield changegroup.genchunk(fname)
1782 lookup = lookuprevlink_func(filerevlog)
1781 lookup = lookuprevlink_func(filerevlog)
1783 for chnk in filerevlog.group(nodeiter, lookup):
1782 for chnk in filerevlog.group(nodeiter, lookup):
1784 yield chnk
1783 yield chnk
1785
1784
1786 yield changegroup.closechunk()
1785 yield changegroup.closechunk()
1787
1786
1788 if nodes:
1787 if nodes:
1789 self.hook('outgoing', node=hex(nodes[0]), source=source)
1788 self.hook('outgoing', node=hex(nodes[0]), source=source)
1790
1789
1791 return util.chunkbuffer(gengroup())
1790 return util.chunkbuffer(gengroup())
1792
1791
1793 def addchangegroup(self, source, srctype, url):
1792 def addchangegroup(self, source, srctype, url):
1794 """add changegroup to repo.
1793 """add changegroup to repo.
1795
1794
1796 return values:
1795 return values:
1797 - nothing changed or no source: 0
1796 - nothing changed or no source: 0
1798 - more heads than before: 1+added heads (2..n)
1797 - more heads than before: 1+added heads (2..n)
1799 - less heads than before: -1-removed heads (-2..-n)
1798 - less heads than before: -1-removed heads (-2..-n)
1800 - number of heads stays the same: 1
1799 - number of heads stays the same: 1
1801 """
1800 """
1802 def csmap(x):
1801 def csmap(x):
1803 self.ui.debug(_("add changeset %s\n") % short(x))
1802 self.ui.debug(_("add changeset %s\n") % short(x))
1804 return cl.count()
1803 return cl.count()
1805
1804
1806 def revmap(x):
1805 def revmap(x):
1807 return cl.rev(x)
1806 return cl.rev(x)
1808
1807
1809 if not source:
1808 if not source:
1810 return 0
1809 return 0
1811
1810
1812 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1811 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1813
1812
1814 changesets = files = revisions = 0
1813 changesets = files = revisions = 0
1815
1814
1816 tr = self.transaction()
1815 tr = self.transaction()
1817
1816
1818 # write changelog data to temp files so concurrent readers will not see
1817 # write changelog data to temp files so concurrent readers will not see
1819 # inconsistent view
1818 # inconsistent view
1820 cl = self.changelog
1819 cl = self.changelog
1821 cl.delayupdate()
1820 cl.delayupdate()
1822 oldheads = len(cl.heads())
1821 oldheads = len(cl.heads())
1823
1822
1824 # pull off the changeset group
1823 # pull off the changeset group
1825 self.ui.status(_("adding changesets\n"))
1824 self.ui.status(_("adding changesets\n"))
1826 cor = cl.count() - 1
1825 cor = cl.count() - 1
1827 chunkiter = changegroup.chunkiter(source)
1826 chunkiter = changegroup.chunkiter(source)
1828 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1827 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1829 raise util.Abort(_("received changelog group is empty"))
1828 raise util.Abort(_("received changelog group is empty"))
1830 cnr = cl.count() - 1
1829 cnr = cl.count() - 1
1831 changesets = cnr - cor
1830 changesets = cnr - cor
1832
1831
1833 # pull off the manifest group
1832 # pull off the manifest group
1834 self.ui.status(_("adding manifests\n"))
1833 self.ui.status(_("adding manifests\n"))
1835 chunkiter = changegroup.chunkiter(source)
1834 chunkiter = changegroup.chunkiter(source)
1836 # no need to check for empty manifest group here:
1835 # no need to check for empty manifest group here:
1837 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1836 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1838 # no new manifest will be created and the manifest group will
1837 # no new manifest will be created and the manifest group will
1839 # be empty during the pull
1838 # be empty during the pull
1840 self.manifest.addgroup(chunkiter, revmap, tr)
1839 self.manifest.addgroup(chunkiter, revmap, tr)
1841
1840
1842 # process the files
1841 # process the files
1843 self.ui.status(_("adding file changes\n"))
1842 self.ui.status(_("adding file changes\n"))
1844 while 1:
1843 while 1:
1845 f = changegroup.getchunk(source)
1844 f = changegroup.getchunk(source)
1846 if not f:
1845 if not f:
1847 break
1846 break
1848 self.ui.debug(_("adding %s revisions\n") % f)
1847 self.ui.debug(_("adding %s revisions\n") % f)
1849 fl = self.file(f)
1848 fl = self.file(f)
1850 o = fl.count()
1849 o = fl.count()
1851 chunkiter = changegroup.chunkiter(source)
1850 chunkiter = changegroup.chunkiter(source)
1852 if fl.addgroup(chunkiter, revmap, tr) is None:
1851 if fl.addgroup(chunkiter, revmap, tr) is None:
1853 raise util.Abort(_("received file revlog group is empty"))
1852 raise util.Abort(_("received file revlog group is empty"))
1854 revisions += fl.count() - o
1853 revisions += fl.count() - o
1855 files += 1
1854 files += 1
1856
1855
1857 # make changelog see real files again
1856 # make changelog see real files again
1858 cl.finalize(tr)
1857 cl.finalize(tr)
1859
1858
1860 newheads = len(self.changelog.heads())
1859 newheads = len(self.changelog.heads())
1861 heads = ""
1860 heads = ""
1862 if oldheads and newheads != oldheads:
1861 if oldheads and newheads != oldheads:
1863 heads = _(" (%+d heads)") % (newheads - oldheads)
1862 heads = _(" (%+d heads)") % (newheads - oldheads)
1864
1863
1865 self.ui.status(_("added %d changesets"
1864 self.ui.status(_("added %d changesets"
1866 " with %d changes to %d files%s\n")
1865 " with %d changes to %d files%s\n")
1867 % (changesets, revisions, files, heads))
1866 % (changesets, revisions, files, heads))
1868
1867
1869 if changesets > 0:
1868 if changesets > 0:
1870 self.hook('pretxnchangegroup', throw=True,
1869 self.hook('pretxnchangegroup', throw=True,
1871 node=hex(self.changelog.node(cor+1)), source=srctype,
1870 node=hex(self.changelog.node(cor+1)), source=srctype,
1872 url=url)
1871 url=url)
1873
1872
1874 tr.close()
1873 tr.close()
1875
1874
1876 if changesets > 0:
1875 if changesets > 0:
1877 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1876 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1878 source=srctype, url=url)
1877 source=srctype, url=url)
1879
1878
1880 for i in xrange(cor + 1, cnr + 1):
1879 for i in xrange(cor + 1, cnr + 1):
1881 self.hook("incoming", node=hex(self.changelog.node(i)),
1880 self.hook("incoming", node=hex(self.changelog.node(i)),
1882 source=srctype, url=url)
1881 source=srctype, url=url)
1883
1882
1884 # never return 0 here:
1883 # never return 0 here:
1885 if newheads < oldheads:
1884 if newheads < oldheads:
1886 return newheads - oldheads - 1
1885 return newheads - oldheads - 1
1887 else:
1886 else:
1888 return newheads - oldheads + 1
1887 return newheads - oldheads + 1
1889
1888
1890
1889
1891 def stream_in(self, remote):
1890 def stream_in(self, remote):
1892 fp = remote.stream_out()
1891 fp = remote.stream_out()
1893 l = fp.readline()
1892 l = fp.readline()
1894 try:
1893 try:
1895 resp = int(l)
1894 resp = int(l)
1896 except ValueError:
1895 except ValueError:
1897 raise util.UnexpectedOutput(
1896 raise util.UnexpectedOutput(
1898 _('Unexpected response from remote server:'), l)
1897 _('Unexpected response from remote server:'), l)
1899 if resp == 1:
1898 if resp == 1:
1900 raise util.Abort(_('operation forbidden by server'))
1899 raise util.Abort(_('operation forbidden by server'))
1901 elif resp == 2:
1900 elif resp == 2:
1902 raise util.Abort(_('locking the remote repository failed'))
1901 raise util.Abort(_('locking the remote repository failed'))
1903 elif resp != 0:
1902 elif resp != 0:
1904 raise util.Abort(_('the server sent an unknown error code'))
1903 raise util.Abort(_('the server sent an unknown error code'))
1905 self.ui.status(_('streaming all changes\n'))
1904 self.ui.status(_('streaming all changes\n'))
1906 l = fp.readline()
1905 l = fp.readline()
1907 try:
1906 try:
1908 total_files, total_bytes = map(int, l.split(' ', 1))
1907 total_files, total_bytes = map(int, l.split(' ', 1))
1909 except ValueError, TypeError:
1908 except ValueError, TypeError:
1910 raise util.UnexpectedOutput(
1909 raise util.UnexpectedOutput(
1911 _('Unexpected response from remote server:'), l)
1910 _('Unexpected response from remote server:'), l)
1912 self.ui.status(_('%d files to transfer, %s of data\n') %
1911 self.ui.status(_('%d files to transfer, %s of data\n') %
1913 (total_files, util.bytecount(total_bytes)))
1912 (total_files, util.bytecount(total_bytes)))
1914 start = time.time()
1913 start = time.time()
1915 for i in xrange(total_files):
1914 for i in xrange(total_files):
1916 # XXX doesn't support '\n' or '\r' in filenames
1915 # XXX doesn't support '\n' or '\r' in filenames
1917 l = fp.readline()
1916 l = fp.readline()
1918 try:
1917 try:
1919 name, size = l.split('\0', 1)
1918 name, size = l.split('\0', 1)
1920 size = int(size)
1919 size = int(size)
1921 except ValueError, TypeError:
1920 except ValueError, TypeError:
1922 raise util.UnexpectedOutput(
1921 raise util.UnexpectedOutput(
1923 _('Unexpected response from remote server:'), l)
1922 _('Unexpected response from remote server:'), l)
1924 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1923 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1925 ofp = self.sopener(name, 'w')
1924 ofp = self.sopener(name, 'w')
1926 for chunk in util.filechunkiter(fp, limit=size):
1925 for chunk in util.filechunkiter(fp, limit=size):
1927 ofp.write(chunk)
1926 ofp.write(chunk)
1928 ofp.close()
1927 ofp.close()
1929 elapsed = time.time() - start
1928 elapsed = time.time() - start
1930 if elapsed <= 0:
1929 if elapsed <= 0:
1931 elapsed = 0.001
1930 elapsed = 0.001
1932 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1931 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1933 (util.bytecount(total_bytes), elapsed,
1932 (util.bytecount(total_bytes), elapsed,
1934 util.bytecount(total_bytes / elapsed)))
1933 util.bytecount(total_bytes / elapsed)))
1935 self.reload()
1934 self.invalidate()
1936 return len(self.heads()) + 1
1935 return len(self.heads()) + 1
1937
1936
1938 def clone(self, remote, heads=[], stream=False):
1937 def clone(self, remote, heads=[], stream=False):
1939 '''clone remote repository.
1938 '''clone remote repository.
1940
1939
1941 keyword arguments:
1940 keyword arguments:
1942 heads: list of revs to clone (forces use of pull)
1941 heads: list of revs to clone (forces use of pull)
1943 stream: use streaming clone if possible'''
1942 stream: use streaming clone if possible'''
1944
1943
1945 # now, all clients that can request uncompressed clones can
1944 # now, all clients that can request uncompressed clones can
1946 # read repo formats supported by all servers that can serve
1945 # read repo formats supported by all servers that can serve
1947 # them.
1946 # them.
1948
1947
1949 # if revlog format changes, client will have to check version
1948 # if revlog format changes, client will have to check version
1950 # and format flags on "stream" capability, and use
1949 # and format flags on "stream" capability, and use
1951 # uncompressed only if compatible.
1950 # uncompressed only if compatible.
1952
1951
1953 if stream and not heads and remote.capable('stream'):
1952 if stream and not heads and remote.capable('stream'):
1954 return self.stream_in(remote)
1953 return self.stream_in(remote)
1955 return self.pull(remote, heads)
1954 return self.pull(remote, heads)
1956
1955
1957 # used to avoid circular references so destructors work
1956 # used to avoid circular references so destructors work
1958 def aftertrans(files):
1957 def aftertrans(files):
1959 renamefiles = [tuple(t) for t in files]
1958 renamefiles = [tuple(t) for t in files]
1960 def a():
1959 def a():
1961 for src, dest in renamefiles:
1960 for src, dest in renamefiles:
1962 util.rename(src, dest)
1961 util.rename(src, dest)
1963 return a
1962 return a
1964
1963
1965 def instance(ui, path, create):
1964 def instance(ui, path, create):
1966 return localrepository(ui, util.drop_scheme('file', path), create)
1965 return localrepository(ui, util.drop_scheme('file', path), create)
1967
1966
1968 def islocal(path):
1967 def islocal(path):
1969 return True
1968 return True
General Comments 0
You need to be logged in to leave comments. Login now