##// END OF EJS Templates
localrepo: factor _findtags() out of tags() (issue548)....
Greg Ward -
r9145:6b03f93b default
parent child Browse files
Show More
@@ -1,333 +1,330 b''
1 1 # Mercurial extension to provide the 'hg bookmark' command
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 '''track a line of development with movable markers
9 9
10 10 Bookmarks are local movable markers to changesets. Every bookmark points to a
11 11 changeset identified by its hash. If you commit a changeset that is based on a
12 12 changeset that has a bookmark on it, the bookmark shifts to the new changeset.
13 13
14 14 It is possible to use bookmark names in every revision lookup (e.g. hg merge,
15 15 hg update).
16 16
17 17 By default, when several bookmarks point to the same changeset, they will all
18 18 move forward together. It is possible to obtain a more git-like experience by
19 19 adding the following configuration option to your .hgrc:
20 20
21 21 [bookmarks]
22 22 track.current = True
23 23
24 24 This will cause Mercurial to track the bookmark that you are currently using,
25 25 and only update it. This is similar to git's approach to branching.
26 26 '''
27 27
28 28 from mercurial.i18n import _
29 29 from mercurial.node import nullid, nullrev, hex, short
30 30 from mercurial import util, commands, localrepo, repair, extensions
31 31 import os
32 32
33 33 def parse(repo):
34 34 '''Parse .hg/bookmarks file and return a dictionary
35 35
36 36 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
37 37 in the .hg/bookmarks file. They are read by the parse() method and
38 38 returned as a dictionary with name => hash values.
39 39
40 40 The parsed dictionary is cached until a write() operation is done.
41 41 '''
42 42 try:
43 43 if repo._bookmarks:
44 44 return repo._bookmarks
45 45 repo._bookmarks = {}
46 46 for line in repo.opener('bookmarks'):
47 47 sha, refspec = line.strip().split(' ', 1)
48 48 repo._bookmarks[refspec] = repo.lookup(sha)
49 49 except:
50 50 pass
51 51 return repo._bookmarks
52 52
53 53 def write(repo, refs):
54 54 '''Write bookmarks
55 55
56 56 Write the given bookmark => hash dictionary to the .hg/bookmarks file
57 57 in a format equal to those of localtags.
58 58
59 59 We also store a backup of the previous state in undo.bookmarks that
60 60 can be copied back on rollback.
61 61 '''
62 62 if os.path.exists(repo.join('bookmarks')):
63 63 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
64 64 if current(repo) not in refs:
65 65 setcurrent(repo, None)
66 66 wlock = repo.wlock()
67 67 try:
68 68 file = repo.opener('bookmarks', 'w', atomictemp=True)
69 69 for refspec, node in refs.iteritems():
70 70 file.write("%s %s\n" % (hex(node), refspec))
71 71 file.rename()
72 72 finally:
73 73 wlock.release()
74 74
75 75 def current(repo):
76 76 '''Get the current bookmark
77 77
78 78 If we use gittishsh branches we have a current bookmark that
79 79 we are on. This function returns the name of the bookmark. It
80 80 is stored in .hg/bookmarks.current
81 81 '''
82 82 if repo._bookmarkcurrent:
83 83 return repo._bookmarkcurrent
84 84 mark = None
85 85 if os.path.exists(repo.join('bookmarks.current')):
86 86 file = repo.opener('bookmarks.current')
87 87 # No readline() in posixfile_nt, reading everything is cheap
88 88 mark = (file.readlines() or [''])[0]
89 89 if mark == '':
90 90 mark = None
91 91 file.close()
92 92 repo._bookmarkcurrent = mark
93 93 return mark
94 94
95 95 def setcurrent(repo, mark):
96 96 '''Set the name of the bookmark that we are currently on
97 97
98 98 Set the name of the bookmark that we are on (hg update <bookmark>).
99 99 The name is recorded in .hg/bookmarks.current
100 100 '''
101 101 if current(repo) == mark:
102 102 return
103 103
104 104 refs = parse(repo)
105 105
106 106 # do not update if we do update to a rev equal to the current bookmark
107 107 if (mark and mark not in refs and
108 108 current(repo) and refs[current(repo)] == repo.changectx('.').node()):
109 109 return
110 110 if mark not in refs:
111 111 mark = ''
112 112 wlock = repo.wlock()
113 113 try:
114 114 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
115 115 file.write(mark)
116 116 file.rename()
117 117 finally:
118 118 wlock.release()
119 119 repo._bookmarkcurrent = mark
120 120
121 121 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
122 122 '''track a line of development with movable markers
123 123
124 124 Bookmarks are pointers to certain commits that move when committing.
125 125 Bookmarks are local. They can be renamed, copied and deleted. It is
126 126 possible to use bookmark names in 'hg merge' and 'hg update' to merge and
127 127 update respectively to a given bookmark.
128 128
129 129 You can use 'hg bookmark NAME' to set a bookmark on the working
130 130 directory's parent revision with the given name. If you specify a revision
131 131 using -r REV (where REV may be an existing bookmark), the bookmark is
132 132 assigned to that revision.
133 133 '''
134 134 hexfn = ui.debugflag and hex or short
135 135 marks = parse(repo)
136 136 cur = repo.changectx('.').node()
137 137
138 138 if rename:
139 139 if rename not in marks:
140 140 raise util.Abort(_("a bookmark of this name does not exist"))
141 141 if mark in marks and not force:
142 142 raise util.Abort(_("a bookmark of the same name already exists"))
143 143 if mark is None:
144 144 raise util.Abort(_("new bookmark name required"))
145 145 marks[mark] = marks[rename]
146 146 del marks[rename]
147 147 if current(repo) == rename:
148 148 setcurrent(repo, mark)
149 149 write(repo, marks)
150 150 return
151 151
152 152 if delete:
153 153 if mark is None:
154 154 raise util.Abort(_("bookmark name required"))
155 155 if mark not in marks:
156 156 raise util.Abort(_("a bookmark of this name does not exist"))
157 157 if mark == current(repo):
158 158 setcurrent(repo, None)
159 159 del marks[mark]
160 160 write(repo, marks)
161 161 return
162 162
163 163 if mark != None:
164 164 if "\n" in mark:
165 165 raise util.Abort(_("bookmark name cannot contain newlines"))
166 166 mark = mark.strip()
167 167 if mark in marks and not force:
168 168 raise util.Abort(_("a bookmark of the same name already exists"))
169 169 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
170 170 and not force):
171 171 raise util.Abort(
172 172 _("a bookmark cannot have the name of an existing branch"))
173 173 if rev:
174 174 marks[mark] = repo.lookup(rev)
175 175 else:
176 176 marks[mark] = repo.changectx('.').node()
177 177 setcurrent(repo, mark)
178 178 write(repo, marks)
179 179 return
180 180
181 181 if mark is None:
182 182 if rev:
183 183 raise util.Abort(_("bookmark name required"))
184 184 if len(marks) == 0:
185 185 ui.status("no bookmarks set\n")
186 186 else:
187 187 for bmark, n in marks.iteritems():
188 188 if ui.configbool('bookmarks', 'track.current'):
189 189 prefix = (bmark == current(repo) and n == cur) and '*' or ' '
190 190 else:
191 191 prefix = (n == cur) and '*' or ' '
192 192
193 193 ui.write(" %s %-25s %d:%s\n" % (
194 194 prefix, bmark, repo.changelog.rev(n), hexfn(n)))
195 195 return
196 196
197 197 def _revstostrip(changelog, node):
198 198 srev = changelog.rev(node)
199 199 tostrip = [srev]
200 200 saveheads = []
201 201 for r in xrange(srev, len(changelog)):
202 202 parents = changelog.parentrevs(r)
203 203 if parents[0] in tostrip or parents[1] in tostrip:
204 204 tostrip.append(r)
205 205 if parents[1] != nullrev:
206 206 for p in parents:
207 207 if p not in tostrip and p > srev:
208 208 saveheads.append(p)
209 209 return [r for r in tostrip if r not in saveheads]
210 210
211 211 def strip(oldstrip, ui, repo, node, backup="all"):
212 212 """Strip bookmarks if revisions are stripped using
213 213 the mercurial.strip method. This usually happens during
214 214 qpush and qpop"""
215 215 revisions = _revstostrip(repo.changelog, node)
216 216 marks = parse(repo)
217 217 update = []
218 218 for mark, n in marks.iteritems():
219 219 if repo.changelog.rev(n) in revisions:
220 220 update.append(mark)
221 221 oldstrip(ui, repo, node, backup)
222 222 if len(update) > 0:
223 223 for m in update:
224 224 marks[m] = repo.changectx('.').node()
225 225 write(repo, marks)
226 226
227 227 def reposetup(ui, repo):
228 228 if not isinstance(repo, localrepo.localrepository):
229 229 return
230 230
231 231 # init a bookmark cache as otherwise we would get a infinite reading
232 232 # in lookup()
233 233 repo._bookmarks = None
234 234 repo._bookmarkcurrent = None
235 235
236 236 class bookmark_repo(repo.__class__):
237 237 def rollback(self):
238 238 if os.path.exists(self.join('undo.bookmarks')):
239 239 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
240 240 return super(bookmark_repo, self).rollback()
241 241
242 242 def lookup(self, key):
243 243 if self._bookmarks is None:
244 244 self._bookmarks = parse(self)
245 245 if key in self._bookmarks:
246 246 key = self._bookmarks[key]
247 247 return super(bookmark_repo, self).lookup(key)
248 248
249 249 def commit(self, *k, **kw):
250 250 """Add a revision to the repository and
251 251 move the bookmark"""
252 252 wlock = self.wlock() # do both commit and bookmark with lock held
253 253 try:
254 254 node = super(bookmark_repo, self).commit(*k, **kw)
255 255 if node is None:
256 256 return None
257 257 parents = self.changelog.parents(node)
258 258 if parents[1] == nullid:
259 259 parents = (parents[0],)
260 260 marks = parse(self)
261 261 update = False
262 262 for mark, n in marks.items():
263 263 if ui.configbool('bookmarks', 'track.current'):
264 264 if mark == current(self) and n in parents:
265 265 marks[mark] = node
266 266 update = True
267 267 else:
268 268 if n in parents:
269 269 marks[mark] = node
270 270 update = True
271 271 if update:
272 272 write(self, marks)
273 273 return node
274 274 finally:
275 275 wlock.release()
276 276
277 277 def addchangegroup(self, source, srctype, url, emptyok=False):
278 278 parents = self.dirstate.parents()
279 279
280 280 result = super(bookmark_repo, self).addchangegroup(
281 281 source, srctype, url, emptyok)
282 282 if result > 1:
283 283 # We have more heads than before
284 284 return result
285 285 node = self.changelog.tip()
286 286 marks = parse(self)
287 287 update = False
288 288 for mark, n in marks.items():
289 289 if n in parents:
290 290 marks[mark] = node
291 291 update = True
292 292 if update:
293 293 write(self, marks)
294 294 return result
295 295
296 def tags(self):
296 def _findtags(self):
297 297 """Merge bookmarks with normal tags"""
298 if self.tagscache:
299 return self.tagscache
300
301 tagscache = super(bookmark_repo, self).tags()
302 tagscache.update(parse(self))
303 return tagscache
298 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
299 tags.update(parse(self))
300 return (tags, tagtypes)
304 301
305 302 repo.__class__ = bookmark_repo
306 303
307 304 def uisetup(ui):
308 305 extensions.wrapfunction(repair, "strip", strip)
309 306 if ui.configbool('bookmarks', 'track.current'):
310 307 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
311 308
312 309 def updatecurbookmark(orig, ui, repo, *args, **opts):
313 310 '''Set the current bookmark
314 311
315 312 If the user updates to a bookmark we update the .hg/bookmarks.current
316 313 file.
317 314 '''
318 315 res = orig(ui, repo, *args, **opts)
319 316 rev = opts['rev']
320 317 if not rev and len(args) > 0:
321 318 rev = args[0]
322 319 setcurrent(repo, rev)
323 320 return res
324 321
325 322 cmdtable = {
326 323 "bookmarks":
327 324 (bookmark,
328 325 [('f', 'force', False, _('force')),
329 326 ('r', 'rev', '', _('revision')),
330 327 ('d', 'delete', False, _('delete a given bookmark')),
331 328 ('m', 'rename', '', _('rename a given bookmark'))],
332 329 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
333 330 }
@@ -1,2618 +1,2617 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and applied
12 12 patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches directory.
15 15 Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial.node import bin, hex, short, nullid, nullrev
34 34 from mercurial.lock import release
35 35 from mercurial import commands, cmdutil, hg, patch, util
36 36 from mercurial import repair, extensions, url, error
37 37 import os, sys, re, errno
38 38
39 39 commands.norepo += " qclone"
40 40
41 41 # Patch names looks like unix-file names.
42 42 # They must be joinable with queue directory and result in the patch path.
43 43 normname = util.normpath
44 44
45 45 class statusentry(object):
46 46 def __init__(self, rev, name=None):
47 47 if not name:
48 48 fields = rev.split(':', 1)
49 49 if len(fields) == 2:
50 50 self.rev, self.name = fields
51 51 else:
52 52 self.rev, self.name = None, None
53 53 else:
54 54 self.rev, self.name = rev, name
55 55
56 56 def __str__(self):
57 57 return self.rev + ':' + self.name
58 58
59 59 class patchheader(object):
60 60 def __init__(self, pf):
61 61 def eatdiff(lines):
62 62 while lines:
63 63 l = lines[-1]
64 64 if (l.startswith("diff -") or
65 65 l.startswith("Index:") or
66 66 l.startswith("===========")):
67 67 del lines[-1]
68 68 else:
69 69 break
70 70 def eatempty(lines):
71 71 while lines:
72 72 l = lines[-1]
73 73 if re.match('\s*$', l):
74 74 del lines[-1]
75 75 else:
76 76 break
77 77
78 78 message = []
79 79 comments = []
80 80 user = None
81 81 date = None
82 82 format = None
83 83 subject = None
84 84 diffstart = 0
85 85
86 86 for line in file(pf):
87 87 line = line.rstrip()
88 88 if line.startswith('diff --git'):
89 89 diffstart = 2
90 90 break
91 91 if diffstart:
92 92 if line.startswith('+++ '):
93 93 diffstart = 2
94 94 break
95 95 if line.startswith("--- "):
96 96 diffstart = 1
97 97 continue
98 98 elif format == "hgpatch":
99 99 # parse values when importing the result of an hg export
100 100 if line.startswith("# User "):
101 101 user = line[7:]
102 102 elif line.startswith("# Date "):
103 103 date = line[7:]
104 104 elif not line.startswith("# ") and line:
105 105 message.append(line)
106 106 format = None
107 107 elif line == '# HG changeset patch':
108 108 format = "hgpatch"
109 109 elif (format != "tagdone" and (line.startswith("Subject: ") or
110 110 line.startswith("subject: "))):
111 111 subject = line[9:]
112 112 format = "tag"
113 113 elif (format != "tagdone" and (line.startswith("From: ") or
114 114 line.startswith("from: "))):
115 115 user = line[6:]
116 116 format = "tag"
117 117 elif format == "tag" and line == "":
118 118 # when looking for tags (subject: from: etc) they
119 119 # end once you find a blank line in the source
120 120 format = "tagdone"
121 121 elif message or line:
122 122 message.append(line)
123 123 comments.append(line)
124 124
125 125 eatdiff(message)
126 126 eatdiff(comments)
127 127 eatempty(message)
128 128 eatempty(comments)
129 129
130 130 # make sure message isn't empty
131 131 if format and format.startswith("tag") and subject:
132 132 message.insert(0, "")
133 133 message.insert(0, subject)
134 134
135 135 self.message = message
136 136 self.comments = comments
137 137 self.user = user
138 138 self.date = date
139 139 self.haspatch = diffstart > 1
140 140
141 141 def setuser(self, user):
142 142 if not self.updateheader(['From: ', '# User '], user):
143 143 try:
144 144 patchheaderat = self.comments.index('# HG changeset patch')
145 145 self.comments.insert(patchheaderat + 1,'# User ' + user)
146 146 except ValueError:
147 147 self.comments = ['From: ' + user, ''] + self.comments
148 148 self.user = user
149 149
150 150 def setdate(self, date):
151 151 if self.updateheader(['# Date '], date):
152 152 self.date = date
153 153
154 154 def setmessage(self, message):
155 155 if self.comments:
156 156 self._delmsg()
157 157 self.message = [message]
158 158 self.comments += self.message
159 159
160 160 def updateheader(self, prefixes, new):
161 161 '''Update all references to a field in the patch header.
162 162 Return whether the field is present.'''
163 163 res = False
164 164 for prefix in prefixes:
165 165 for i in xrange(len(self.comments)):
166 166 if self.comments[i].startswith(prefix):
167 167 self.comments[i] = prefix + new
168 168 res = True
169 169 break
170 170 return res
171 171
172 172 def __str__(self):
173 173 if not self.comments:
174 174 return ''
175 175 return '\n'.join(self.comments) + '\n\n'
176 176
177 177 def _delmsg(self):
178 178 '''Remove existing message, keeping the rest of the comments fields.
179 179 If comments contains 'subject: ', message will prepend
180 180 the field and a blank line.'''
181 181 if self.message:
182 182 subj = 'subject: ' + self.message[0].lower()
183 183 for i in xrange(len(self.comments)):
184 184 if subj == self.comments[i].lower():
185 185 del self.comments[i]
186 186 self.message = self.message[2:]
187 187 break
188 188 ci = 0
189 189 for mi in self.message:
190 190 while mi != self.comments[ci]:
191 191 ci += 1
192 192 del self.comments[ci]
193 193
194 194 class queue(object):
195 195 def __init__(self, ui, path, patchdir=None):
196 196 self.basepath = path
197 197 self.path = patchdir or os.path.join(path, "patches")
198 198 self.opener = util.opener(self.path)
199 199 self.ui = ui
200 200 self.applied_dirty = 0
201 201 self.series_dirty = 0
202 202 self.series_path = "series"
203 203 self.status_path = "status"
204 204 self.guards_path = "guards"
205 205 self.active_guards = None
206 206 self.guards_dirty = False
207 207 self._diffopts = None
208 208
209 209 @util.propertycache
210 210 def applied(self):
211 211 if os.path.exists(self.join(self.status_path)):
212 212 lines = self.opener(self.status_path).read().splitlines()
213 213 return [statusentry(l) for l in lines]
214 214 return []
215 215
216 216 @util.propertycache
217 217 def full_series(self):
218 218 if os.path.exists(self.join(self.series_path)):
219 219 return self.opener(self.series_path).read().splitlines()
220 220 return []
221 221
222 222 @util.propertycache
223 223 def series(self):
224 224 self.parse_series()
225 225 return self.series
226 226
227 227 @util.propertycache
228 228 def series_guards(self):
229 229 self.parse_series()
230 230 return self.series_guards
231 231
232 232 def invalidate(self):
233 233 for a in 'applied full_series series series_guards'.split():
234 234 if a in self.__dict__:
235 235 delattr(self, a)
236 236 self.applied_dirty = 0
237 237 self.series_dirty = 0
238 238 self.guards_dirty = False
239 239 self.active_guards = None
240 240
241 241 def diffopts(self):
242 242 if self._diffopts is None:
243 243 self._diffopts = patch.diffopts(self.ui)
244 244 return self._diffopts
245 245
246 246 def join(self, *p):
247 247 return os.path.join(self.path, *p)
248 248
249 249 def find_series(self, patch):
250 250 pre = re.compile("(\s*)([^#]+)")
251 251 index = 0
252 252 for l in self.full_series:
253 253 m = pre.match(l)
254 254 if m:
255 255 s = m.group(2)
256 256 s = s.rstrip()
257 257 if s == patch:
258 258 return index
259 259 index += 1
260 260 return None
261 261
262 262 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
263 263
264 264 def parse_series(self):
265 265 self.series = []
266 266 self.series_guards = []
267 267 for l in self.full_series:
268 268 h = l.find('#')
269 269 if h == -1:
270 270 patch = l
271 271 comment = ''
272 272 elif h == 0:
273 273 continue
274 274 else:
275 275 patch = l[:h]
276 276 comment = l[h:]
277 277 patch = patch.strip()
278 278 if patch:
279 279 if patch in self.series:
280 280 raise util.Abort(_('%s appears more than once in %s') %
281 281 (patch, self.join(self.series_path)))
282 282 self.series.append(patch)
283 283 self.series_guards.append(self.guard_re.findall(comment))
284 284
285 285 def check_guard(self, guard):
286 286 if not guard:
287 287 return _('guard cannot be an empty string')
288 288 bad_chars = '# \t\r\n\f'
289 289 first = guard[0]
290 290 if first in '-+':
291 291 return (_('guard %r starts with invalid character: %r') %
292 292 (guard, first))
293 293 for c in bad_chars:
294 294 if c in guard:
295 295 return _('invalid character in guard %r: %r') % (guard, c)
296 296
297 297 def set_active(self, guards):
298 298 for guard in guards:
299 299 bad = self.check_guard(guard)
300 300 if bad:
301 301 raise util.Abort(bad)
302 302 guards = sorted(set(guards))
303 303 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
304 304 self.active_guards = guards
305 305 self.guards_dirty = True
306 306
307 307 def active(self):
308 308 if self.active_guards is None:
309 309 self.active_guards = []
310 310 try:
311 311 guards = self.opener(self.guards_path).read().split()
312 312 except IOError, err:
313 313 if err.errno != errno.ENOENT: raise
314 314 guards = []
315 315 for i, guard in enumerate(guards):
316 316 bad = self.check_guard(guard)
317 317 if bad:
318 318 self.ui.warn('%s:%d: %s\n' %
319 319 (self.join(self.guards_path), i + 1, bad))
320 320 else:
321 321 self.active_guards.append(guard)
322 322 return self.active_guards
323 323
324 324 def set_guards(self, idx, guards):
325 325 for g in guards:
326 326 if len(g) < 2:
327 327 raise util.Abort(_('guard %r too short') % g)
328 328 if g[0] not in '-+':
329 329 raise util.Abort(_('guard %r starts with invalid char') % g)
330 330 bad = self.check_guard(g[1:])
331 331 if bad:
332 332 raise util.Abort(bad)
333 333 drop = self.guard_re.sub('', self.full_series[idx])
334 334 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
335 335 self.parse_series()
336 336 self.series_dirty = True
337 337
338 338 def pushable(self, idx):
339 339 if isinstance(idx, str):
340 340 idx = self.series.index(idx)
341 341 patchguards = self.series_guards[idx]
342 342 if not patchguards:
343 343 return True, None
344 344 guards = self.active()
345 345 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
346 346 if exactneg:
347 347 return False, exactneg[0]
348 348 pos = [g for g in patchguards if g[0] == '+']
349 349 exactpos = [g for g in pos if g[1:] in guards]
350 350 if pos:
351 351 if exactpos:
352 352 return True, exactpos[0]
353 353 return False, pos
354 354 return True, ''
355 355
356 356 def explain_pushable(self, idx, all_patches=False):
357 357 write = all_patches and self.ui.write or self.ui.warn
358 358 if all_patches or self.ui.verbose:
359 359 if isinstance(idx, str):
360 360 idx = self.series.index(idx)
361 361 pushable, why = self.pushable(idx)
362 362 if all_patches and pushable:
363 363 if why is None:
364 364 write(_('allowing %s - no guards in effect\n') %
365 365 self.series[idx])
366 366 else:
367 367 if not why:
368 368 write(_('allowing %s - no matching negative guards\n') %
369 369 self.series[idx])
370 370 else:
371 371 write(_('allowing %s - guarded by %r\n') %
372 372 (self.series[idx], why))
373 373 if not pushable:
374 374 if why:
375 375 write(_('skipping %s - guarded by %r\n') %
376 376 (self.series[idx], why))
377 377 else:
378 378 write(_('skipping %s - no matching guards\n') %
379 379 self.series[idx])
380 380
381 381 def save_dirty(self):
382 382 def write_list(items, path):
383 383 fp = self.opener(path, 'w')
384 384 for i in items:
385 385 fp.write("%s\n" % i)
386 386 fp.close()
387 387 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
388 388 if self.series_dirty: write_list(self.full_series, self.series_path)
389 389 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
390 390
391 391 def removeundo(self, repo):
392 392 undo = repo.sjoin('undo')
393 393 if not os.path.exists(undo):
394 394 return
395 395 try:
396 396 os.unlink(undo)
397 397 except OSError, inst:
398 398 self.ui.warn(_('error removing undo: %s\n') % str(inst))
399 399
400 400 def printdiff(self, repo, node1, node2=None, files=None,
401 401 fp=None, changes=None, opts={}):
402 402 m = cmdutil.match(repo, files, opts)
403 403 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
404 404 write = fp is None and repo.ui.write or fp.write
405 405 for chunk in chunks:
406 406 write(chunk)
407 407
408 408 def mergeone(self, repo, mergeq, head, patch, rev):
409 409 # first try just applying the patch
410 410 (err, n) = self.apply(repo, [ patch ], update_status=False,
411 411 strict=True, merge=rev)
412 412
413 413 if err == 0:
414 414 return (err, n)
415 415
416 416 if n is None:
417 417 raise util.Abort(_("apply failed for patch %s") % patch)
418 418
419 419 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
420 420
421 421 # apply failed, strip away that rev and merge.
422 422 hg.clean(repo, head)
423 423 self.strip(repo, n, update=False, backup='strip')
424 424
425 425 ctx = repo[rev]
426 426 ret = hg.merge(repo, rev)
427 427 if ret:
428 428 raise util.Abort(_("update returned %d") % ret)
429 429 n = repo.commit(ctx.description(), ctx.user(), force=True)
430 430 if n is None:
431 431 raise util.Abort(_("repo commit failed"))
432 432 try:
433 433 ph = patchheader(mergeq.join(patch))
434 434 except:
435 435 raise util.Abort(_("unable to read %s") % patch)
436 436
437 437 patchf = self.opener(patch, "w")
438 438 comments = str(ph)
439 439 if comments:
440 440 patchf.write(comments)
441 441 self.printdiff(repo, head, n, fp=patchf)
442 442 patchf.close()
443 443 self.removeundo(repo)
444 444 return (0, n)
445 445
446 446 def qparents(self, repo, rev=None):
447 447 if rev is None:
448 448 (p1, p2) = repo.dirstate.parents()
449 449 if p2 == nullid:
450 450 return p1
451 451 if len(self.applied) == 0:
452 452 return None
453 453 return bin(self.applied[-1].rev)
454 454 pp = repo.changelog.parents(rev)
455 455 if pp[1] != nullid:
456 456 arevs = [ x.rev for x in self.applied ]
457 457 p0 = hex(pp[0])
458 458 p1 = hex(pp[1])
459 459 if p0 in arevs:
460 460 return pp[0]
461 461 if p1 in arevs:
462 462 return pp[1]
463 463 return pp[0]
464 464
465 465 def mergepatch(self, repo, mergeq, series):
466 466 if len(self.applied) == 0:
467 467 # each of the patches merged in will have two parents. This
468 468 # can confuse the qrefresh, qdiff, and strip code because it
469 469 # needs to know which parent is actually in the patch queue.
470 470 # so, we insert a merge marker with only one parent. This way
471 471 # the first patch in the queue is never a merge patch
472 472 #
473 473 pname = ".hg.patches.merge.marker"
474 474 n = repo.commit('[mq]: merge marker', force=True)
475 475 self.removeundo(repo)
476 476 self.applied.append(statusentry(hex(n), pname))
477 477 self.applied_dirty = 1
478 478
479 479 head = self.qparents(repo)
480 480
481 481 for patch in series:
482 482 patch = mergeq.lookup(patch, strict=True)
483 483 if not patch:
484 484 self.ui.warn(_("patch %s does not exist\n") % patch)
485 485 return (1, None)
486 486 pushable, reason = self.pushable(patch)
487 487 if not pushable:
488 488 self.explain_pushable(patch, all_patches=True)
489 489 continue
490 490 info = mergeq.isapplied(patch)
491 491 if not info:
492 492 self.ui.warn(_("patch %s is not applied\n") % patch)
493 493 return (1, None)
494 494 rev = bin(info[1])
495 495 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
496 496 if head:
497 497 self.applied.append(statusentry(hex(head), patch))
498 498 self.applied_dirty = 1
499 499 if err:
500 500 return (err, head)
501 501 self.save_dirty()
502 502 return (0, head)
503 503
504 504 def patch(self, repo, patchfile):
505 505 '''Apply patchfile to the working directory.
506 506 patchfile: name of patch file'''
507 507 files = {}
508 508 try:
509 509 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
510 510 files=files, eolmode=None)
511 511 except Exception, inst:
512 512 self.ui.note(str(inst) + '\n')
513 513 if not self.ui.verbose:
514 514 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
515 515 return (False, files, False)
516 516
517 517 return (True, files, fuzz)
518 518
519 519 def apply(self, repo, series, list=False, update_status=True,
520 520 strict=False, patchdir=None, merge=None, all_files={}):
521 521 wlock = lock = tr = None
522 522 try:
523 523 wlock = repo.wlock()
524 524 lock = repo.lock()
525 525 tr = repo.transaction()
526 526 try:
527 527 ret = self._apply(repo, series, list, update_status,
528 528 strict, patchdir, merge, all_files=all_files)
529 529 tr.close()
530 530 self.save_dirty()
531 531 return ret
532 532 except:
533 533 try:
534 534 tr.abort()
535 535 finally:
536 536 repo.invalidate()
537 537 repo.dirstate.invalidate()
538 538 raise
539 539 finally:
540 540 del tr
541 541 release(lock, wlock)
542 542 self.removeundo(repo)
543 543
544 544 def _apply(self, repo, series, list=False, update_status=True,
545 545 strict=False, patchdir=None, merge=None, all_files={}):
546 546 '''returns (error, hash)
547 547 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
548 548 # TODO unify with commands.py
549 549 if not patchdir:
550 550 patchdir = self.path
551 551 err = 0
552 552 n = None
553 553 for patchname in series:
554 554 pushable, reason = self.pushable(patchname)
555 555 if not pushable:
556 556 self.explain_pushable(patchname, all_patches=True)
557 557 continue
558 558 self.ui.status(_("applying %s\n") % patchname)
559 559 pf = os.path.join(patchdir, patchname)
560 560
561 561 try:
562 562 ph = patchheader(self.join(patchname))
563 563 except:
564 564 self.ui.warn(_("unable to read %s\n") % patchname)
565 565 err = 1
566 566 break
567 567
568 568 message = ph.message
569 569 if not message:
570 570 message = _("imported patch %s\n") % patchname
571 571 else:
572 572 if list:
573 573 message.append(_("\nimported patch %s") % patchname)
574 574 message = '\n'.join(message)
575 575
576 576 if ph.haspatch:
577 577 (patcherr, files, fuzz) = self.patch(repo, pf)
578 578 all_files.update(files)
579 579 patcherr = not patcherr
580 580 else:
581 581 self.ui.warn(_("patch %s is empty\n") % patchname)
582 582 patcherr, files, fuzz = 0, [], 0
583 583
584 584 if merge and files:
585 585 # Mark as removed/merged and update dirstate parent info
586 586 removed = []
587 587 merged = []
588 588 for f in files:
589 589 if os.path.exists(repo.wjoin(f)):
590 590 merged.append(f)
591 591 else:
592 592 removed.append(f)
593 593 for f in removed:
594 594 repo.dirstate.remove(f)
595 595 for f in merged:
596 596 repo.dirstate.merge(f)
597 597 p1, p2 = repo.dirstate.parents()
598 598 repo.dirstate.setparents(p1, merge)
599 599
600 600 files = patch.updatedir(self.ui, repo, files)
601 601 match = cmdutil.matchfiles(repo, files or [])
602 602 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
603 603
604 604 if n is None:
605 605 raise util.Abort(_("repo commit failed"))
606 606
607 607 if update_status:
608 608 self.applied.append(statusentry(hex(n), patchname))
609 609
610 610 if patcherr:
611 611 self.ui.warn(_("patch failed, rejects left in working dir\n"))
612 612 err = 2
613 613 break
614 614
615 615 if fuzz and strict:
616 616 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
617 617 err = 3
618 618 break
619 619 return (err, n)
620 620
621 621 def _cleanup(self, patches, numrevs, keep=False):
622 622 if not keep:
623 623 r = self.qrepo()
624 624 if r:
625 625 r.remove(patches, True)
626 626 else:
627 627 for p in patches:
628 628 os.unlink(self.join(p))
629 629
630 630 if numrevs:
631 631 del self.applied[:numrevs]
632 632 self.applied_dirty = 1
633 633
634 634 for i in sorted([self.find_series(p) for p in patches], reverse=True):
635 635 del self.full_series[i]
636 636 self.parse_series()
637 637 self.series_dirty = 1
638 638
639 639 def _revpatches(self, repo, revs):
640 640 firstrev = repo[self.applied[0].rev].rev()
641 641 patches = []
642 642 for i, rev in enumerate(revs):
643 643
644 644 if rev < firstrev:
645 645 raise util.Abort(_('revision %d is not managed') % rev)
646 646
647 647 ctx = repo[rev]
648 648 base = bin(self.applied[i].rev)
649 649 if ctx.node() != base:
650 650 msg = _('cannot delete revision %d above applied patches')
651 651 raise util.Abort(msg % rev)
652 652
653 653 patch = self.applied[i].name
654 654 for fmt in ('[mq]: %s', 'imported patch %s'):
655 655 if ctx.description() == fmt % patch:
656 656 msg = _('patch %s finalized without changeset message\n')
657 657 repo.ui.status(msg % patch)
658 658 break
659 659
660 660 patches.append(patch)
661 661 return patches
662 662
663 663 def finish(self, repo, revs):
664 664 patches = self._revpatches(repo, sorted(revs))
665 665 self._cleanup(patches, len(patches))
666 666
667 667 def delete(self, repo, patches, opts):
668 668 if not patches and not opts.get('rev'):
669 669 raise util.Abort(_('qdelete requires at least one revision or '
670 670 'patch name'))
671 671
672 672 realpatches = []
673 673 for patch in patches:
674 674 patch = self.lookup(patch, strict=True)
675 675 info = self.isapplied(patch)
676 676 if info:
677 677 raise util.Abort(_("cannot delete applied patch %s") % patch)
678 678 if patch not in self.series:
679 679 raise util.Abort(_("patch %s not in series file") % patch)
680 680 realpatches.append(patch)
681 681
682 682 numrevs = 0
683 683 if opts.get('rev'):
684 684 if not self.applied:
685 685 raise util.Abort(_('no patches applied'))
686 686 revs = cmdutil.revrange(repo, opts['rev'])
687 687 if len(revs) > 1 and revs[0] > revs[1]:
688 688 revs.reverse()
689 689 revpatches = self._revpatches(repo, revs)
690 690 realpatches += revpatches
691 691 numrevs = len(revpatches)
692 692
693 693 self._cleanup(realpatches, numrevs, opts.get('keep'))
694 694
695 695 def check_toppatch(self, repo):
696 696 if len(self.applied) > 0:
697 697 top = bin(self.applied[-1].rev)
698 698 pp = repo.dirstate.parents()
699 699 if top not in pp:
700 700 raise util.Abort(_("working directory revision is not qtip"))
701 701 return top
702 702 return None
703 703 def check_localchanges(self, repo, force=False, refresh=True):
704 704 m, a, r, d = repo.status()[:4]
705 705 if m or a or r or d:
706 706 if not force:
707 707 if refresh:
708 708 raise util.Abort(_("local changes found, refresh first"))
709 709 else:
710 710 raise util.Abort(_("local changes found"))
711 711 return m, a, r, d
712 712
713 713 _reserved = ('series', 'status', 'guards')
714 714 def check_reserved_name(self, name):
715 715 if (name in self._reserved or name.startswith('.hg')
716 716 or name.startswith('.mq')):
717 717 raise util.Abort(_('"%s" cannot be used as the name of a patch')
718 718 % name)
719 719
720 720 def new(self, repo, patchfn, *pats, **opts):
721 721 """options:
722 722 msg: a string or a no-argument function returning a string
723 723 """
724 724 msg = opts.get('msg')
725 725 force = opts.get('force')
726 726 user = opts.get('user')
727 727 date = opts.get('date')
728 728 if date:
729 729 date = util.parsedate(date)
730 730 self.check_reserved_name(patchfn)
731 731 if os.path.exists(self.join(patchfn)):
732 732 raise util.Abort(_('patch "%s" already exists') % patchfn)
733 733 if opts.get('include') or opts.get('exclude') or pats:
734 734 match = cmdutil.match(repo, pats, opts)
735 735 # detect missing files in pats
736 736 def badfn(f, msg):
737 737 raise util.Abort('%s: %s' % (f, msg))
738 738 match.bad = badfn
739 739 m, a, r, d = repo.status(match=match)[:4]
740 740 else:
741 741 m, a, r, d = self.check_localchanges(repo, force)
742 742 match = cmdutil.matchfiles(repo, m + a + r)
743 743 commitfiles = m + a + r
744 744 self.check_toppatch(repo)
745 745 insert = self.full_series_end()
746 746 wlock = repo.wlock()
747 747 try:
748 748 # if patch file write fails, abort early
749 749 p = self.opener(patchfn, "w")
750 750 try:
751 751 if date:
752 752 p.write("# HG changeset patch\n")
753 753 if user:
754 754 p.write("# User " + user + "\n")
755 755 p.write("# Date %d %d\n\n" % date)
756 756 elif user:
757 757 p.write("From: " + user + "\n\n")
758 758
759 759 if hasattr(msg, '__call__'):
760 760 msg = msg()
761 761 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
762 762 n = repo.commit(commitmsg, user, date, match=match, force=True)
763 763 if n is None:
764 764 raise util.Abort(_("repo commit failed"))
765 765 try:
766 766 self.full_series[insert:insert] = [patchfn]
767 767 self.applied.append(statusentry(hex(n), patchfn))
768 768 self.parse_series()
769 769 self.series_dirty = 1
770 770 self.applied_dirty = 1
771 771 if msg:
772 772 msg = msg + "\n\n"
773 773 p.write(msg)
774 774 if commitfiles:
775 775 diffopts = self.diffopts()
776 776 if opts.get('git'): diffopts.git = True
777 777 parent = self.qparents(repo, n)
778 778 chunks = patch.diff(repo, node1=parent, node2=n,
779 779 match=match, opts=diffopts)
780 780 for chunk in chunks:
781 781 p.write(chunk)
782 782 p.close()
783 783 wlock.release()
784 784 wlock = None
785 785 r = self.qrepo()
786 786 if r: r.add([patchfn])
787 787 except:
788 788 repo.rollback()
789 789 raise
790 790 except Exception:
791 791 patchpath = self.join(patchfn)
792 792 try:
793 793 os.unlink(patchpath)
794 794 except:
795 795 self.ui.warn(_('error unlinking %s\n') % patchpath)
796 796 raise
797 797 self.removeundo(repo)
798 798 finally:
799 799 release(wlock)
800 800
801 801 def strip(self, repo, rev, update=True, backup="all", force=None):
802 802 wlock = lock = None
803 803 try:
804 804 wlock = repo.wlock()
805 805 lock = repo.lock()
806 806
807 807 if update:
808 808 self.check_localchanges(repo, force=force, refresh=False)
809 809 urev = self.qparents(repo, rev)
810 810 hg.clean(repo, urev)
811 811 repo.dirstate.write()
812 812
813 813 self.removeundo(repo)
814 814 repair.strip(self.ui, repo, rev, backup)
815 815 # strip may have unbundled a set of backed up revisions after
816 816 # the actual strip
817 817 self.removeundo(repo)
818 818 finally:
819 819 release(lock, wlock)
820 820
821 821 def isapplied(self, patch):
822 822 """returns (index, rev, patch)"""
823 823 for i, a in enumerate(self.applied):
824 824 if a.name == patch:
825 825 return (i, a.rev, a.name)
826 826 return None
827 827
828 828 # if the exact patch name does not exist, we try a few
829 829 # variations. If strict is passed, we try only #1
830 830 #
831 831 # 1) a number to indicate an offset in the series file
832 832 # 2) a unique substring of the patch name was given
833 833 # 3) patchname[-+]num to indicate an offset in the series file
834 834 def lookup(self, patch, strict=False):
835 835 patch = patch and str(patch)
836 836
837 837 def partial_name(s):
838 838 if s in self.series:
839 839 return s
840 840 matches = [x for x in self.series if s in x]
841 841 if len(matches) > 1:
842 842 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
843 843 for m in matches:
844 844 self.ui.warn(' %s\n' % m)
845 845 return None
846 846 if matches:
847 847 return matches[0]
848 848 if len(self.series) > 0 and len(self.applied) > 0:
849 849 if s == 'qtip':
850 850 return self.series[self.series_end(True)-1]
851 851 if s == 'qbase':
852 852 return self.series[0]
853 853 return None
854 854
855 855 if patch is None:
856 856 return None
857 857 if patch in self.series:
858 858 return patch
859 859
860 860 if not os.path.isfile(self.join(patch)):
861 861 try:
862 862 sno = int(patch)
863 863 except(ValueError, OverflowError):
864 864 pass
865 865 else:
866 866 if -len(self.series) <= sno < len(self.series):
867 867 return self.series[sno]
868 868
869 869 if not strict:
870 870 res = partial_name(patch)
871 871 if res:
872 872 return res
873 873 minus = patch.rfind('-')
874 874 if minus >= 0:
875 875 res = partial_name(patch[:minus])
876 876 if res:
877 877 i = self.series.index(res)
878 878 try:
879 879 off = int(patch[minus+1:] or 1)
880 880 except(ValueError, OverflowError):
881 881 pass
882 882 else:
883 883 if i - off >= 0:
884 884 return self.series[i - off]
885 885 plus = patch.rfind('+')
886 886 if plus >= 0:
887 887 res = partial_name(patch[:plus])
888 888 if res:
889 889 i = self.series.index(res)
890 890 try:
891 891 off = int(patch[plus+1:] or 1)
892 892 except(ValueError, OverflowError):
893 893 pass
894 894 else:
895 895 if i + off < len(self.series):
896 896 return self.series[i + off]
897 897 raise util.Abort(_("patch %s not in series") % patch)
898 898
899 899 def push(self, repo, patch=None, force=False, list=False,
900 900 mergeq=None, all=False):
901 901 wlock = repo.wlock()
902 902 try:
903 903 if repo.dirstate.parents()[0] not in repo.heads():
904 904 self.ui.status(_("(working directory not at a head)\n"))
905 905
906 906 if not self.series:
907 907 self.ui.warn(_('no patches in series\n'))
908 908 return 0
909 909
910 910 patch = self.lookup(patch)
911 911 # Suppose our series file is: A B C and the current 'top'
912 912 # patch is B. qpush C should be performed (moving forward)
913 913 # qpush B is a NOP (no change) qpush A is an error (can't
914 914 # go backwards with qpush)
915 915 if patch:
916 916 info = self.isapplied(patch)
917 917 if info:
918 918 if info[0] < len(self.applied) - 1:
919 919 raise util.Abort(
920 920 _("cannot push to a previous patch: %s") % patch)
921 921 self.ui.warn(
922 922 _('qpush: %s is already at the top\n') % patch)
923 923 return
924 924 pushable, reason = self.pushable(patch)
925 925 if not pushable:
926 926 if reason:
927 927 reason = _('guarded by %r') % reason
928 928 else:
929 929 reason = _('no matching guards')
930 930 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
931 931 return 1
932 932 elif all:
933 933 patch = self.series[-1]
934 934 if self.isapplied(patch):
935 935 self.ui.warn(_('all patches are currently applied\n'))
936 936 return 0
937 937
938 938 # Following the above example, starting at 'top' of B:
939 939 # qpush should be performed (pushes C), but a subsequent
940 940 # qpush without an argument is an error (nothing to
941 941 # apply). This allows a loop of "...while hg qpush..." to
942 942 # work as it detects an error when done
943 943 start = self.series_end()
944 944 if start == len(self.series):
945 945 self.ui.warn(_('patch series already fully applied\n'))
946 946 return 1
947 947 if not force:
948 948 self.check_localchanges(repo)
949 949
950 950 self.applied_dirty = 1
951 951 if start > 0:
952 952 self.check_toppatch(repo)
953 953 if not patch:
954 954 patch = self.series[start]
955 955 end = start + 1
956 956 else:
957 957 end = self.series.index(patch, start) + 1
958 958
959 959 s = self.series[start:end]
960 960 all_files = {}
961 961 try:
962 962 if mergeq:
963 963 ret = self.mergepatch(repo, mergeq, s)
964 964 else:
965 965 ret = self.apply(repo, s, list, all_files=all_files)
966 966 except:
967 967 self.ui.warn(_('cleaning up working directory...'))
968 968 node = repo.dirstate.parents()[0]
969 969 hg.revert(repo, node, None)
970 970 unknown = repo.status(unknown=True)[4]
971 971 # only remove unknown files that we know we touched or
972 972 # created while patching
973 973 for f in unknown:
974 974 if f in all_files:
975 975 util.unlink(repo.wjoin(f))
976 976 self.ui.warn(_('done\n'))
977 977 raise
978 978
979 979 top = self.applied[-1].name
980 980 if ret[0] and ret[0] > 1:
981 981 msg = _("errors during apply, please fix and refresh %s\n")
982 982 self.ui.write(msg % top)
983 983 else:
984 984 self.ui.write(_("now at: %s\n") % top)
985 985 return ret[0]
986 986
987 987 finally:
988 988 wlock.release()
989 989
990 990 def pop(self, repo, patch=None, force=False, update=True, all=False):
991 991 def getfile(f, rev, flags):
992 992 t = repo.file(f).read(rev)
993 993 repo.wwrite(f, t, flags)
994 994
995 995 wlock = repo.wlock()
996 996 try:
997 997 if patch:
998 998 # index, rev, patch
999 999 info = self.isapplied(patch)
1000 1000 if not info:
1001 1001 patch = self.lookup(patch)
1002 1002 info = self.isapplied(patch)
1003 1003 if not info:
1004 1004 raise util.Abort(_("patch %s is not applied") % patch)
1005 1005
1006 1006 if len(self.applied) == 0:
1007 1007 # Allow qpop -a to work repeatedly,
1008 1008 # but not qpop without an argument
1009 1009 self.ui.warn(_("no patches applied\n"))
1010 1010 return not all
1011 1011
1012 1012 if all:
1013 1013 start = 0
1014 1014 elif patch:
1015 1015 start = info[0] + 1
1016 1016 else:
1017 1017 start = len(self.applied) - 1
1018 1018
1019 1019 if start >= len(self.applied):
1020 1020 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1021 1021 return
1022 1022
1023 1023 if not update:
1024 1024 parents = repo.dirstate.parents()
1025 1025 rr = [ bin(x.rev) for x in self.applied ]
1026 1026 for p in parents:
1027 1027 if p in rr:
1028 1028 self.ui.warn(_("qpop: forcing dirstate update\n"))
1029 1029 update = True
1030 1030 else:
1031 1031 parents = [p.hex() for p in repo[None].parents()]
1032 1032 needupdate = False
1033 1033 for entry in self.applied[start:]:
1034 1034 if entry.rev in parents:
1035 1035 needupdate = True
1036 1036 break
1037 1037 update = needupdate
1038 1038
1039 1039 if not force and update:
1040 1040 self.check_localchanges(repo)
1041 1041
1042 1042 self.applied_dirty = 1
1043 1043 end = len(self.applied)
1044 1044 rev = bin(self.applied[start].rev)
1045 1045 if update:
1046 1046 top = self.check_toppatch(repo)
1047 1047
1048 1048 try:
1049 1049 heads = repo.changelog.heads(rev)
1050 1050 except error.LookupError:
1051 1051 node = short(rev)
1052 1052 raise util.Abort(_('trying to pop unknown node %s') % node)
1053 1053
1054 1054 if heads != [bin(self.applied[-1].rev)]:
1055 1055 raise util.Abort(_("popping would remove a revision not "
1056 1056 "managed by this patch queue"))
1057 1057
1058 1058 # we know there are no local changes, so we can make a simplified
1059 1059 # form of hg.update.
1060 1060 if update:
1061 1061 qp = self.qparents(repo, rev)
1062 1062 changes = repo.changelog.read(qp)
1063 1063 mmap = repo.manifest.read(changes[0])
1064 1064 m, a, r, d = repo.status(qp, top)[:4]
1065 1065 if d:
1066 1066 raise util.Abort(_("deletions found between repo revs"))
1067 1067 for f in m:
1068 1068 getfile(f, mmap[f], mmap.flags(f))
1069 1069 for f in r:
1070 1070 getfile(f, mmap[f], mmap.flags(f))
1071 1071 for f in m + r:
1072 1072 repo.dirstate.normal(f)
1073 1073 for f in a:
1074 1074 try:
1075 1075 os.unlink(repo.wjoin(f))
1076 1076 except OSError, e:
1077 1077 if e.errno != errno.ENOENT:
1078 1078 raise
1079 1079 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1080 1080 except: pass
1081 1081 repo.dirstate.forget(f)
1082 1082 repo.dirstate.setparents(qp, nullid)
1083 1083 for patch in reversed(self.applied[start:end]):
1084 1084 self.ui.status(_("popping %s\n") % patch.name)
1085 1085 del self.applied[start:end]
1086 1086 self.strip(repo, rev, update=False, backup='strip')
1087 1087 if len(self.applied):
1088 1088 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1089 1089 else:
1090 1090 self.ui.write(_("patch queue now empty\n"))
1091 1091 finally:
1092 1092 wlock.release()
1093 1093
1094 1094 def diff(self, repo, pats, opts):
1095 1095 top = self.check_toppatch(repo)
1096 1096 if not top:
1097 1097 self.ui.write(_("no patches applied\n"))
1098 1098 return
1099 1099 qp = self.qparents(repo, top)
1100 1100 self._diffopts = patch.diffopts(self.ui, opts)
1101 1101 self.printdiff(repo, qp, files=pats, opts=opts)
1102 1102
1103 1103 def refresh(self, repo, pats=None, **opts):
1104 1104 if len(self.applied) == 0:
1105 1105 self.ui.write(_("no patches applied\n"))
1106 1106 return 1
1107 1107 msg = opts.get('msg', '').rstrip()
1108 1108 newuser = opts.get('user')
1109 1109 newdate = opts.get('date')
1110 1110 if newdate:
1111 1111 newdate = '%d %d' % util.parsedate(newdate)
1112 1112 wlock = repo.wlock()
1113 1113 try:
1114 1114 self.check_toppatch(repo)
1115 1115 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1116 1116 top = bin(top)
1117 1117 if repo.changelog.heads(top) != [top]:
1118 1118 raise util.Abort(_("cannot refresh a revision with children"))
1119 1119 cparents = repo.changelog.parents(top)
1120 1120 patchparent = self.qparents(repo, top)
1121 1121 ph = patchheader(self.join(patchfn))
1122 1122
1123 1123 patchf = self.opener(patchfn, 'r')
1124 1124
1125 1125 # if the patch was a git patch, refresh it as a git patch
1126 1126 for line in patchf:
1127 1127 if line.startswith('diff --git'):
1128 1128 self.diffopts().git = True
1129 1129 break
1130 1130
1131 1131 if msg:
1132 1132 ph.setmessage(msg)
1133 1133 if newuser:
1134 1134 ph.setuser(newuser)
1135 1135 if newdate:
1136 1136 ph.setdate(newdate)
1137 1137
1138 1138 # only commit new patch when write is complete
1139 1139 patchf = self.opener(patchfn, 'w', atomictemp=True)
1140 1140
1141 1141 patchf.seek(0)
1142 1142 patchf.truncate()
1143 1143
1144 1144 comments = str(ph)
1145 1145 if comments:
1146 1146 patchf.write(comments)
1147 1147
1148 1148 if opts.get('git'):
1149 1149 self.diffopts().git = True
1150 1150 tip = repo.changelog.tip()
1151 1151 if top == tip:
1152 1152 # if the top of our patch queue is also the tip, there is an
1153 1153 # optimization here. We update the dirstate in place and strip
1154 1154 # off the tip commit. Then just commit the current directory
1155 1155 # tree. We can also send repo.commit the list of files
1156 1156 # changed to speed up the diff
1157 1157 #
1158 1158 # in short mode, we only diff the files included in the
1159 1159 # patch already plus specified files
1160 1160 #
1161 1161 # this should really read:
1162 1162 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1163 1163 # but we do it backwards to take advantage of manifest/chlog
1164 1164 # caching against the next repo.status call
1165 1165 #
1166 1166 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1167 1167 changes = repo.changelog.read(tip)
1168 1168 man = repo.manifest.read(changes[0])
1169 1169 aaa = aa[:]
1170 1170 matchfn = cmdutil.match(repo, pats, opts)
1171 1171 if opts.get('short'):
1172 1172 # if amending a patch, we start with existing
1173 1173 # files plus specified files - unfiltered
1174 1174 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1175 1175 # filter with inc/exl options
1176 1176 matchfn = cmdutil.match(repo, opts=opts)
1177 1177 else:
1178 1178 match = cmdutil.matchall(repo)
1179 1179 m, a, r, d = repo.status(match=match)[:4]
1180 1180
1181 1181 # we might end up with files that were added between
1182 1182 # tip and the dirstate parent, but then changed in the
1183 1183 # local dirstate. in this case, we want them to only
1184 1184 # show up in the added section
1185 1185 for x in m:
1186 1186 if x not in aa:
1187 1187 mm.append(x)
1188 1188 # we might end up with files added by the local dirstate that
1189 1189 # were deleted by the patch. In this case, they should only
1190 1190 # show up in the changed section.
1191 1191 for x in a:
1192 1192 if x in dd:
1193 1193 del dd[dd.index(x)]
1194 1194 mm.append(x)
1195 1195 else:
1196 1196 aa.append(x)
1197 1197 # make sure any files deleted in the local dirstate
1198 1198 # are not in the add or change column of the patch
1199 1199 forget = []
1200 1200 for x in d + r:
1201 1201 if x in aa:
1202 1202 del aa[aa.index(x)]
1203 1203 forget.append(x)
1204 1204 continue
1205 1205 elif x in mm:
1206 1206 del mm[mm.index(x)]
1207 1207 dd.append(x)
1208 1208
1209 1209 m = list(set(mm))
1210 1210 r = list(set(dd))
1211 1211 a = list(set(aa))
1212 1212 c = [filter(matchfn, l) for l in (m, a, r)]
1213 1213 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1214 1214 chunks = patch.diff(repo, patchparent, match=match,
1215 1215 changes=c, opts=self.diffopts())
1216 1216 for chunk in chunks:
1217 1217 patchf.write(chunk)
1218 1218
1219 1219 try:
1220 1220 if self.diffopts().git:
1221 1221 copies = {}
1222 1222 for dst in a:
1223 1223 src = repo.dirstate.copied(dst)
1224 1224 # during qfold, the source file for copies may
1225 1225 # be removed. Treat this as a simple add.
1226 1226 if src is not None and src in repo.dirstate:
1227 1227 copies.setdefault(src, []).append(dst)
1228 1228 repo.dirstate.add(dst)
1229 1229 # remember the copies between patchparent and tip
1230 1230 for dst in aaa:
1231 1231 f = repo.file(dst)
1232 1232 src = f.renamed(man[dst])
1233 1233 if src:
1234 1234 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1235 1235 if dst in a:
1236 1236 copies[src[0]].append(dst)
1237 1237 # we can't copy a file created by the patch itself
1238 1238 if dst in copies:
1239 1239 del copies[dst]
1240 1240 for src, dsts in copies.iteritems():
1241 1241 for dst in dsts:
1242 1242 repo.dirstate.copy(src, dst)
1243 1243 else:
1244 1244 for dst in a:
1245 1245 repo.dirstate.add(dst)
1246 1246 # Drop useless copy information
1247 1247 for f in list(repo.dirstate.copies()):
1248 1248 repo.dirstate.copy(None, f)
1249 1249 for f in r:
1250 1250 repo.dirstate.remove(f)
1251 1251 # if the patch excludes a modified file, mark that
1252 1252 # file with mtime=0 so status can see it.
1253 1253 mm = []
1254 1254 for i in xrange(len(m)-1, -1, -1):
1255 1255 if not matchfn(m[i]):
1256 1256 mm.append(m[i])
1257 1257 del m[i]
1258 1258 for f in m:
1259 1259 repo.dirstate.normal(f)
1260 1260 for f in mm:
1261 1261 repo.dirstate.normallookup(f)
1262 1262 for f in forget:
1263 1263 repo.dirstate.forget(f)
1264 1264
1265 1265 if not msg:
1266 1266 if not ph.message:
1267 1267 message = "[mq]: %s\n" % patchfn
1268 1268 else:
1269 1269 message = "\n".join(ph.message)
1270 1270 else:
1271 1271 message = msg
1272 1272
1273 1273 user = ph.user or changes[1]
1274 1274
1275 1275 # assumes strip can roll itself back if interrupted
1276 1276 repo.dirstate.setparents(*cparents)
1277 1277 self.applied.pop()
1278 1278 self.applied_dirty = 1
1279 1279 self.strip(repo, top, update=False,
1280 1280 backup='strip')
1281 1281 except:
1282 1282 repo.dirstate.invalidate()
1283 1283 raise
1284 1284
1285 1285 try:
1286 1286 # might be nice to attempt to roll back strip after this
1287 1287 patchf.rename()
1288 1288 n = repo.commit(message, user, ph.date, match=match,
1289 1289 force=True)
1290 1290 self.applied.append(statusentry(hex(n), patchfn))
1291 1291 except:
1292 1292 ctx = repo[cparents[0]]
1293 1293 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1294 1294 self.save_dirty()
1295 1295 self.ui.warn(_('refresh interrupted while patch was popped! '
1296 1296 '(revert --all, qpush to recover)\n'))
1297 1297 raise
1298 1298 else:
1299 1299 self.printdiff(repo, patchparent, fp=patchf)
1300 1300 patchf.rename()
1301 1301 added = repo.status()[1]
1302 1302 for a in added:
1303 1303 f = repo.wjoin(a)
1304 1304 try:
1305 1305 os.unlink(f)
1306 1306 except OSError, e:
1307 1307 if e.errno != errno.ENOENT:
1308 1308 raise
1309 1309 try: os.removedirs(os.path.dirname(f))
1310 1310 except: pass
1311 1311 # forget the file copies in the dirstate
1312 1312 # push should readd the files later on
1313 1313 repo.dirstate.forget(a)
1314 1314 self.pop(repo, force=True)
1315 1315 self.push(repo, force=True)
1316 1316 finally:
1317 1317 wlock.release()
1318 1318 self.removeundo(repo)
1319 1319
1320 1320 def init(self, repo, create=False):
1321 1321 if not create and os.path.isdir(self.path):
1322 1322 raise util.Abort(_("patch queue directory already exists"))
1323 1323 try:
1324 1324 os.mkdir(self.path)
1325 1325 except OSError, inst:
1326 1326 if inst.errno != errno.EEXIST or not create:
1327 1327 raise
1328 1328 if create:
1329 1329 return self.qrepo(create=True)
1330 1330
1331 1331 def unapplied(self, repo, patch=None):
1332 1332 if patch and patch not in self.series:
1333 1333 raise util.Abort(_("patch %s is not in series file") % patch)
1334 1334 if not patch:
1335 1335 start = self.series_end()
1336 1336 else:
1337 1337 start = self.series.index(patch) + 1
1338 1338 unapplied = []
1339 1339 for i in xrange(start, len(self.series)):
1340 1340 pushable, reason = self.pushable(i)
1341 1341 if pushable:
1342 1342 unapplied.append((i, self.series[i]))
1343 1343 self.explain_pushable(i)
1344 1344 return unapplied
1345 1345
1346 1346 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1347 1347 summary=False):
1348 1348 def displayname(pfx, patchname):
1349 1349 if summary:
1350 1350 ph = patchheader(self.join(patchname))
1351 1351 msg = ph.message
1352 1352 msg = msg and ': ' + msg[0] or ': '
1353 1353 else:
1354 1354 msg = ''
1355 1355 msg = "%s%s%s" % (pfx, patchname, msg)
1356 1356 if self.ui.interactive():
1357 1357 msg = util.ellipsis(msg, util.termwidth())
1358 1358 self.ui.write(msg + '\n')
1359 1359
1360 1360 applied = set([p.name for p in self.applied])
1361 1361 if length is None:
1362 1362 length = len(self.series) - start
1363 1363 if not missing:
1364 1364 if self.ui.verbose:
1365 1365 idxwidth = len(str(start+length - 1))
1366 1366 for i in xrange(start, start+length):
1367 1367 patch = self.series[i]
1368 1368 if patch in applied:
1369 1369 stat = 'A'
1370 1370 elif self.pushable(i)[0]:
1371 1371 stat = 'U'
1372 1372 else:
1373 1373 stat = 'G'
1374 1374 pfx = ''
1375 1375 if self.ui.verbose:
1376 1376 pfx = '%*d %s ' % (idxwidth, i, stat)
1377 1377 elif status and status != stat:
1378 1378 continue
1379 1379 displayname(pfx, patch)
1380 1380 else:
1381 1381 msng_list = []
1382 1382 for root, dirs, files in os.walk(self.path):
1383 1383 d = root[len(self.path) + 1:]
1384 1384 for f in files:
1385 1385 fl = os.path.join(d, f)
1386 1386 if (fl not in self.series and
1387 1387 fl not in (self.status_path, self.series_path,
1388 1388 self.guards_path)
1389 1389 and not fl.startswith('.')):
1390 1390 msng_list.append(fl)
1391 1391 for x in sorted(msng_list):
1392 1392 pfx = self.ui.verbose and ('D ') or ''
1393 1393 displayname(pfx, x)
1394 1394
1395 1395 def issaveline(self, l):
1396 1396 if l.name == '.hg.patches.save.line':
1397 1397 return True
1398 1398
1399 1399 def qrepo(self, create=False):
1400 1400 if create or os.path.isdir(self.join(".hg")):
1401 1401 return hg.repository(self.ui, path=self.path, create=create)
1402 1402
1403 1403 def restore(self, repo, rev, delete=None, qupdate=None):
1404 1404 c = repo.changelog.read(rev)
1405 1405 desc = c[4].strip()
1406 1406 lines = desc.splitlines()
1407 1407 i = 0
1408 1408 datastart = None
1409 1409 series = []
1410 1410 applied = []
1411 1411 qpp = None
1412 1412 for i, line in enumerate(lines):
1413 1413 if line == 'Patch Data:':
1414 1414 datastart = i + 1
1415 1415 elif line.startswith('Dirstate:'):
1416 1416 l = line.rstrip()
1417 1417 l = l[10:].split(' ')
1418 1418 qpp = [ bin(x) for x in l ]
1419 1419 elif datastart != None:
1420 1420 l = line.rstrip()
1421 1421 se = statusentry(l)
1422 1422 file_ = se.name
1423 1423 if se.rev:
1424 1424 applied.append(se)
1425 1425 else:
1426 1426 series.append(file_)
1427 1427 if datastart is None:
1428 1428 self.ui.warn(_("No saved patch data found\n"))
1429 1429 return 1
1430 1430 self.ui.warn(_("restoring status: %s\n") % lines[0])
1431 1431 self.full_series = series
1432 1432 self.applied = applied
1433 1433 self.parse_series()
1434 1434 self.series_dirty = 1
1435 1435 self.applied_dirty = 1
1436 1436 heads = repo.changelog.heads()
1437 1437 if delete:
1438 1438 if rev not in heads:
1439 1439 self.ui.warn(_("save entry has children, leaving it alone\n"))
1440 1440 else:
1441 1441 self.ui.warn(_("removing save entry %s\n") % short(rev))
1442 1442 pp = repo.dirstate.parents()
1443 1443 if rev in pp:
1444 1444 update = True
1445 1445 else:
1446 1446 update = False
1447 1447 self.strip(repo, rev, update=update, backup='strip')
1448 1448 if qpp:
1449 1449 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1450 1450 (short(qpp[0]), short(qpp[1])))
1451 1451 if qupdate:
1452 1452 self.ui.status(_("queue directory updating\n"))
1453 1453 r = self.qrepo()
1454 1454 if not r:
1455 1455 self.ui.warn(_("Unable to load queue repository\n"))
1456 1456 return 1
1457 1457 hg.clean(r, qpp[0])
1458 1458
1459 1459 def save(self, repo, msg=None):
1460 1460 if len(self.applied) == 0:
1461 1461 self.ui.warn(_("save: no patches applied, exiting\n"))
1462 1462 return 1
1463 1463 if self.issaveline(self.applied[-1]):
1464 1464 self.ui.warn(_("status is already saved\n"))
1465 1465 return 1
1466 1466
1467 1467 ar = [ ':' + x for x in self.full_series ]
1468 1468 if not msg:
1469 1469 msg = _("hg patches saved state")
1470 1470 else:
1471 1471 msg = "hg patches: " + msg.rstrip('\r\n')
1472 1472 r = self.qrepo()
1473 1473 if r:
1474 1474 pp = r.dirstate.parents()
1475 1475 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1476 1476 msg += "\n\nPatch Data:\n"
1477 1477 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1478 1478 "\n".join(ar) + '\n' or "")
1479 1479 n = repo.commit(text, force=True)
1480 1480 if not n:
1481 1481 self.ui.warn(_("repo commit failed\n"))
1482 1482 return 1
1483 1483 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1484 1484 self.applied_dirty = 1
1485 1485 self.removeundo(repo)
1486 1486
1487 1487 def full_series_end(self):
1488 1488 if len(self.applied) > 0:
1489 1489 p = self.applied[-1].name
1490 1490 end = self.find_series(p)
1491 1491 if end is None:
1492 1492 return len(self.full_series)
1493 1493 return end + 1
1494 1494 return 0
1495 1495
1496 1496 def series_end(self, all_patches=False):
1497 1497 """If all_patches is False, return the index of the next pushable patch
1498 1498 in the series, or the series length. If all_patches is True, return the
1499 1499 index of the first patch past the last applied one.
1500 1500 """
1501 1501 end = 0
1502 1502 def next(start):
1503 1503 if all_patches:
1504 1504 return start
1505 1505 i = start
1506 1506 while i < len(self.series):
1507 1507 p, reason = self.pushable(i)
1508 1508 if p:
1509 1509 break
1510 1510 self.explain_pushable(i)
1511 1511 i += 1
1512 1512 return i
1513 1513 if len(self.applied) > 0:
1514 1514 p = self.applied[-1].name
1515 1515 try:
1516 1516 end = self.series.index(p)
1517 1517 except ValueError:
1518 1518 return 0
1519 1519 return next(end + 1)
1520 1520 return next(end)
1521 1521
1522 1522 def appliedname(self, index):
1523 1523 pname = self.applied[index].name
1524 1524 if not self.ui.verbose:
1525 1525 p = pname
1526 1526 else:
1527 1527 p = str(self.series.index(pname)) + " " + pname
1528 1528 return p
1529 1529
1530 1530 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1531 1531 force=None, git=False):
1532 1532 def checkseries(patchname):
1533 1533 if patchname in self.series:
1534 1534 raise util.Abort(_('patch %s is already in the series file')
1535 1535 % patchname)
1536 1536 def checkfile(patchname):
1537 1537 if not force and os.path.exists(self.join(patchname)):
1538 1538 raise util.Abort(_('patch "%s" already exists')
1539 1539 % patchname)
1540 1540
1541 1541 if rev:
1542 1542 if files:
1543 1543 raise util.Abort(_('option "-r" not valid when importing '
1544 1544 'files'))
1545 1545 rev = cmdutil.revrange(repo, rev)
1546 1546 rev.sort(reverse=True)
1547 1547 if (len(files) > 1 or len(rev) > 1) and patchname:
1548 1548 raise util.Abort(_('option "-n" not valid when importing multiple '
1549 1549 'patches'))
1550 1550 i = 0
1551 1551 added = []
1552 1552 if rev:
1553 1553 # If mq patches are applied, we can only import revisions
1554 1554 # that form a linear path to qbase.
1555 1555 # Otherwise, they should form a linear path to a head.
1556 1556 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1557 1557 if len(heads) > 1:
1558 1558 raise util.Abort(_('revision %d is the root of more than one '
1559 1559 'branch') % rev[-1])
1560 1560 if self.applied:
1561 1561 base = hex(repo.changelog.node(rev[0]))
1562 1562 if base in [n.rev for n in self.applied]:
1563 1563 raise util.Abort(_('revision %d is already managed')
1564 1564 % rev[0])
1565 1565 if heads != [bin(self.applied[-1].rev)]:
1566 1566 raise util.Abort(_('revision %d is not the parent of '
1567 1567 'the queue') % rev[0])
1568 1568 base = repo.changelog.rev(bin(self.applied[0].rev))
1569 1569 lastparent = repo.changelog.parentrevs(base)[0]
1570 1570 else:
1571 1571 if heads != [repo.changelog.node(rev[0])]:
1572 1572 raise util.Abort(_('revision %d has unmanaged children')
1573 1573 % rev[0])
1574 1574 lastparent = None
1575 1575
1576 1576 if git:
1577 1577 self.diffopts().git = True
1578 1578
1579 1579 for r in rev:
1580 1580 p1, p2 = repo.changelog.parentrevs(r)
1581 1581 n = repo.changelog.node(r)
1582 1582 if p2 != nullrev:
1583 1583 raise util.Abort(_('cannot import merge revision %d') % r)
1584 1584 if lastparent and lastparent != r:
1585 1585 raise util.Abort(_('revision %d is not the parent of %d')
1586 1586 % (r, lastparent))
1587 1587 lastparent = p1
1588 1588
1589 1589 if not patchname:
1590 1590 patchname = normname('%d.diff' % r)
1591 1591 self.check_reserved_name(patchname)
1592 1592 checkseries(patchname)
1593 1593 checkfile(patchname)
1594 1594 self.full_series.insert(0, patchname)
1595 1595
1596 1596 patchf = self.opener(patchname, "w")
1597 1597 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1598 1598 patchf.close()
1599 1599
1600 1600 se = statusentry(hex(n), patchname)
1601 1601 self.applied.insert(0, se)
1602 1602
1603 1603 added.append(patchname)
1604 1604 patchname = None
1605 1605 self.parse_series()
1606 1606 self.applied_dirty = 1
1607 1607
1608 1608 for filename in files:
1609 1609 if existing:
1610 1610 if filename == '-':
1611 1611 raise util.Abort(_('-e is incompatible with import from -'))
1612 1612 if not patchname:
1613 1613 patchname = normname(filename)
1614 1614 self.check_reserved_name(patchname)
1615 1615 if not os.path.isfile(self.join(patchname)):
1616 1616 raise util.Abort(_("patch %s does not exist") % patchname)
1617 1617 else:
1618 1618 try:
1619 1619 if filename == '-':
1620 1620 if not patchname:
1621 1621 raise util.Abort(_('need --name to import a patch from -'))
1622 1622 text = sys.stdin.read()
1623 1623 else:
1624 1624 text = url.open(self.ui, filename).read()
1625 1625 except (OSError, IOError):
1626 1626 raise util.Abort(_("unable to read %s") % filename)
1627 1627 if not patchname:
1628 1628 patchname = normname(os.path.basename(filename))
1629 1629 self.check_reserved_name(patchname)
1630 1630 checkfile(patchname)
1631 1631 patchf = self.opener(patchname, "w")
1632 1632 patchf.write(text)
1633 1633 if not force:
1634 1634 checkseries(patchname)
1635 1635 if patchname not in self.series:
1636 1636 index = self.full_series_end() + i
1637 1637 self.full_series[index:index] = [patchname]
1638 1638 self.parse_series()
1639 1639 self.ui.warn(_("adding %s to series file\n") % patchname)
1640 1640 i += 1
1641 1641 added.append(patchname)
1642 1642 patchname = None
1643 1643 self.series_dirty = 1
1644 1644 qrepo = self.qrepo()
1645 1645 if qrepo:
1646 1646 qrepo.add(added)
1647 1647
1648 1648 def delete(ui, repo, *patches, **opts):
1649 1649 """remove patches from queue
1650 1650
1651 1651 The patches must not be applied, and at least one patch is required. With
1652 1652 -k/--keep, the patch files are preserved in the patch directory.
1653 1653
1654 1654 To stop managing a patch and move it into permanent history,
1655 1655 use the qfinish command."""
1656 1656 q = repo.mq
1657 1657 q.delete(repo, patches, opts)
1658 1658 q.save_dirty()
1659 1659 return 0
1660 1660
1661 1661 def applied(ui, repo, patch=None, **opts):
1662 1662 """print the patches already applied"""
1663 1663 q = repo.mq
1664 1664 if patch:
1665 1665 if patch not in q.series:
1666 1666 raise util.Abort(_("patch %s is not in series file") % patch)
1667 1667 end = q.series.index(patch) + 1
1668 1668 else:
1669 1669 end = q.series_end(True)
1670 1670 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1671 1671
1672 1672 def unapplied(ui, repo, patch=None, **opts):
1673 1673 """print the patches not yet applied"""
1674 1674 q = repo.mq
1675 1675 if patch:
1676 1676 if patch not in q.series:
1677 1677 raise util.Abort(_("patch %s is not in series file") % patch)
1678 1678 start = q.series.index(patch) + 1
1679 1679 else:
1680 1680 start = q.series_end(True)
1681 1681 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1682 1682
1683 1683 def qimport(ui, repo, *filename, **opts):
1684 1684 """import a patch
1685 1685
1686 1686 The patch is inserted into the series after the last applied patch. If no
1687 1687 patches have been applied, qimport prepends the patch to the series.
1688 1688
1689 1689 The patch will have the same name as its source file unless you give it a
1690 1690 new one with -n/--name.
1691 1691
1692 1692 You can register an existing patch inside the patch directory with the
1693 1693 -e/--existing flag.
1694 1694
1695 1695 With -f/--force, an existing patch of the same name will be overwritten.
1696 1696
1697 1697 An existing changeset may be placed under mq control with -r/--rev (e.g.
1698 1698 qimport --rev tip -n patch will place tip under mq control). With
1699 1699 -g/--git, patches imported with --rev will use the git diff format. See
1700 1700 the diffs help topic for information on why this is important for
1701 1701 preserving rename/copy information and permission changes.
1702 1702
1703 1703 To import a patch from standard input, pass - as the patch file. When
1704 1704 importing from standard input, a patch name must be specified using the
1705 1705 --name flag.
1706 1706 """
1707 1707 q = repo.mq
1708 1708 q.qimport(repo, filename, patchname=opts['name'],
1709 1709 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1710 1710 git=opts['git'])
1711 1711 q.save_dirty()
1712 1712
1713 1713 if opts.get('push') and not opts.get('rev'):
1714 1714 return q.push(repo, None)
1715 1715 return 0
1716 1716
1717 1717 def init(ui, repo, **opts):
1718 1718 """init a new queue repository
1719 1719
1720 1720 The queue repository is unversioned by default. If -c/--create-repo is
1721 1721 specified, qinit will create a separate nested repository for patches
1722 1722 (qinit -c may also be run later to convert an unversioned patch repository
1723 1723 into a versioned one). You can use qcommit to commit changes to this queue
1724 1724 repository.
1725 1725 """
1726 1726 q = repo.mq
1727 1727 r = q.init(repo, create=opts['create_repo'])
1728 1728 q.save_dirty()
1729 1729 if r:
1730 1730 if not os.path.exists(r.wjoin('.hgignore')):
1731 1731 fp = r.wopener('.hgignore', 'w')
1732 1732 fp.write('^\\.hg\n')
1733 1733 fp.write('^\\.mq\n')
1734 1734 fp.write('syntax: glob\n')
1735 1735 fp.write('status\n')
1736 1736 fp.write('guards\n')
1737 1737 fp.close()
1738 1738 if not os.path.exists(r.wjoin('series')):
1739 1739 r.wopener('series', 'w').close()
1740 1740 r.add(['.hgignore', 'series'])
1741 1741 commands.add(ui, r)
1742 1742 return 0
1743 1743
1744 1744 def clone(ui, source, dest=None, **opts):
1745 1745 '''clone main and patch repository at same time
1746 1746
1747 1747 If source is local, destination will have no patches applied. If source is
1748 1748 remote, this command can not check if patches are applied in source, so
1749 1749 cannot guarantee that patches are not applied in destination. If you clone
1750 1750 remote repository, be sure before that it has no patches applied.
1751 1751
1752 1752 Source patch repository is looked for in <src>/.hg/patches by default. Use
1753 1753 -p <url> to change.
1754 1754
1755 1755 The patch directory must be a nested Mercurial repository, as would be
1756 1756 created by qinit -c.
1757 1757 '''
1758 1758 def patchdir(repo):
1759 1759 url = repo.url()
1760 1760 if url.endswith('/'):
1761 1761 url = url[:-1]
1762 1762 return url + '/.hg/patches'
1763 1763 if dest is None:
1764 1764 dest = hg.defaultdest(source)
1765 1765 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1766 1766 if opts['patches']:
1767 1767 patchespath = ui.expandpath(opts['patches'])
1768 1768 else:
1769 1769 patchespath = patchdir(sr)
1770 1770 try:
1771 1771 hg.repository(ui, patchespath)
1772 1772 except error.RepoError:
1773 1773 raise util.Abort(_('versioned patch repository not found'
1774 1774 ' (see qinit -c)'))
1775 1775 qbase, destrev = None, None
1776 1776 if sr.local():
1777 1777 if sr.mq.applied:
1778 1778 qbase = bin(sr.mq.applied[0].rev)
1779 1779 if not hg.islocal(dest):
1780 1780 heads = set(sr.heads())
1781 1781 destrev = list(heads.difference(sr.heads(qbase)))
1782 1782 destrev.append(sr.changelog.parents(qbase)[0])
1783 1783 elif sr.capable('lookup'):
1784 1784 try:
1785 1785 qbase = sr.lookup('qbase')
1786 1786 except error.RepoError:
1787 1787 pass
1788 1788 ui.note(_('cloning main repository\n'))
1789 1789 sr, dr = hg.clone(ui, sr.url(), dest,
1790 1790 pull=opts['pull'],
1791 1791 rev=destrev,
1792 1792 update=False,
1793 1793 stream=opts['uncompressed'])
1794 1794 ui.note(_('cloning patch repository\n'))
1795 1795 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1796 1796 pull=opts['pull'], update=not opts['noupdate'],
1797 1797 stream=opts['uncompressed'])
1798 1798 if dr.local():
1799 1799 if qbase:
1800 1800 ui.note(_('stripping applied patches from destination '
1801 1801 'repository\n'))
1802 1802 dr.mq.strip(dr, qbase, update=False, backup=None)
1803 1803 if not opts['noupdate']:
1804 1804 ui.note(_('updating destination repository\n'))
1805 1805 hg.update(dr, dr.changelog.tip())
1806 1806
1807 1807 def commit(ui, repo, *pats, **opts):
1808 1808 """commit changes in the queue repository"""
1809 1809 q = repo.mq
1810 1810 r = q.qrepo()
1811 1811 if not r: raise util.Abort('no queue repository')
1812 1812 commands.commit(r.ui, r, *pats, **opts)
1813 1813
1814 1814 def series(ui, repo, **opts):
1815 1815 """print the entire series file"""
1816 1816 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1817 1817 return 0
1818 1818
1819 1819 def top(ui, repo, **opts):
1820 1820 """print the name of the current patch"""
1821 1821 q = repo.mq
1822 1822 t = q.applied and q.series_end(True) or 0
1823 1823 if t:
1824 1824 return q.qseries(repo, start=t-1, length=1, status='A',
1825 1825 summary=opts.get('summary'))
1826 1826 else:
1827 1827 ui.write(_("no patches applied\n"))
1828 1828 return 1
1829 1829
1830 1830 def next(ui, repo, **opts):
1831 1831 """print the name of the next patch"""
1832 1832 q = repo.mq
1833 1833 end = q.series_end()
1834 1834 if end == len(q.series):
1835 1835 ui.write(_("all patches applied\n"))
1836 1836 return 1
1837 1837 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1838 1838
1839 1839 def prev(ui, repo, **opts):
1840 1840 """print the name of the previous patch"""
1841 1841 q = repo.mq
1842 1842 l = len(q.applied)
1843 1843 if l == 1:
1844 1844 ui.write(_("only one patch applied\n"))
1845 1845 return 1
1846 1846 if not l:
1847 1847 ui.write(_("no patches applied\n"))
1848 1848 return 1
1849 1849 return q.qseries(repo, start=l-2, length=1, status='A',
1850 1850 summary=opts.get('summary'))
1851 1851
1852 1852 def setupheaderopts(ui, opts):
1853 1853 def do(opt,val):
1854 1854 if not opts[opt] and opts['current' + opt]:
1855 1855 opts[opt] = val
1856 1856 do('user', ui.username())
1857 1857 do('date', "%d %d" % util.makedate())
1858 1858
1859 1859 def new(ui, repo, patch, *args, **opts):
1860 1860 """create a new patch
1861 1861
1862 1862 qnew creates a new patch on top of the currently-applied patch (if any).
1863 1863 It will refuse to run if there are any outstanding changes unless
1864 1864 -f/--force is specified, in which case the patch will be initialized with
1865 1865 them. You may also use -I/--include, -X/--exclude, and/or a list of files
1866 1866 after the patch name to add only changes to matching files to the new
1867 1867 patch, leaving the rest as uncommitted modifications.
1868 1868
1869 1869 -u/--user and -d/--date can be used to set the (given) user and date,
1870 1870 respectively. -U/--currentuser and -D/--currentdate set user to current
1871 1871 user and date to current date.
1872 1872
1873 1873 -e/--edit, -m/--message or -l/--logfile set the patch header as well as
1874 1874 the commit message. If none is specified, the header is empty and the
1875 1875 commit message is '[mq]: PATCH'.
1876 1876
1877 1877 Use the -g/--git option to keep the patch in the git extended diff format.
1878 1878 Read the diffs help topic for more information on why this is important
1879 1879 for preserving permission changes and copy/rename information.
1880 1880 """
1881 1881 msg = cmdutil.logmessage(opts)
1882 1882 def getmsg(): return ui.edit(msg, ui.username())
1883 1883 q = repo.mq
1884 1884 opts['msg'] = msg
1885 1885 if opts.get('edit'):
1886 1886 opts['msg'] = getmsg
1887 1887 else:
1888 1888 opts['msg'] = msg
1889 1889 setupheaderopts(ui, opts)
1890 1890 q.new(repo, patch, *args, **opts)
1891 1891 q.save_dirty()
1892 1892 return 0
1893 1893
1894 1894 def refresh(ui, repo, *pats, **opts):
1895 1895 """update the current patch
1896 1896
1897 1897 If any file patterns are provided, the refreshed patch will contain only
1898 1898 the modifications that match those patterns; the remaining modifications
1899 1899 will remain in the working directory.
1900 1900
1901 1901 If -s/--short is specified, files currently included in the patch will be
1902 1902 refreshed just like matched files and remain in the patch.
1903 1903
1904 1904 hg add/remove/copy/rename work as usual, though you might want to use
1905 1905 git-style patches (-g/--git or [diff] git=1) to track copies and renames.
1906 1906 See the diffs help topic for more information on the git diff format.
1907 1907 """
1908 1908 q = repo.mq
1909 1909 message = cmdutil.logmessage(opts)
1910 1910 if opts['edit']:
1911 1911 if not q.applied:
1912 1912 ui.write(_("no patches applied\n"))
1913 1913 return 1
1914 1914 if message:
1915 1915 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1916 1916 patch = q.applied[-1].name
1917 1917 ph = patchheader(q.join(patch))
1918 1918 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1919 1919 setupheaderopts(ui, opts)
1920 1920 ret = q.refresh(repo, pats, msg=message, **opts)
1921 1921 q.save_dirty()
1922 1922 return ret
1923 1923
1924 1924 def diff(ui, repo, *pats, **opts):
1925 1925 """diff of the current patch and subsequent modifications
1926 1926
1927 1927 Shows a diff which includes the current patch as well as any changes which
1928 1928 have been made in the working directory since the last refresh (thus
1929 1929 showing what the current patch would become after a qrefresh).
1930 1930
1931 1931 Use 'hg diff' if you only want to see the changes made since the last
1932 1932 qrefresh, or 'hg export qtip' if you want to see changes made by the
1933 1933 current patch without including changes made since the qrefresh.
1934 1934 """
1935 1935 repo.mq.diff(repo, pats, opts)
1936 1936 return 0
1937 1937
1938 1938 def fold(ui, repo, *files, **opts):
1939 1939 """fold the named patches into the current patch
1940 1940
1941 1941 Patches must not yet be applied. Each patch will be successively applied
1942 1942 to the current patch in the order given. If all the patches apply
1943 1943 successfully, the current patch will be refreshed with the new cumulative
1944 1944 patch, and the folded patches will be deleted. With -k/--keep, the folded
1945 1945 patch files will not be removed afterwards.
1946 1946
1947 1947 The header for each folded patch will be concatenated with the current
1948 1948 patch header, separated by a line of '* * *'.
1949 1949 """
1950 1950
1951 1951 q = repo.mq
1952 1952
1953 1953 if not files:
1954 1954 raise util.Abort(_('qfold requires at least one patch name'))
1955 1955 if not q.check_toppatch(repo):
1956 1956 raise util.Abort(_('No patches applied'))
1957 1957 q.check_localchanges(repo)
1958 1958
1959 1959 message = cmdutil.logmessage(opts)
1960 1960 if opts['edit']:
1961 1961 if message:
1962 1962 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1963 1963
1964 1964 parent = q.lookup('qtip')
1965 1965 patches = []
1966 1966 messages = []
1967 1967 for f in files:
1968 1968 p = q.lookup(f)
1969 1969 if p in patches or p == parent:
1970 1970 ui.warn(_('Skipping already folded patch %s') % p)
1971 1971 if q.isapplied(p):
1972 1972 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1973 1973 patches.append(p)
1974 1974
1975 1975 for p in patches:
1976 1976 if not message:
1977 1977 ph = patchheader(q.join(p))
1978 1978 if ph.message:
1979 1979 messages.append(ph.message)
1980 1980 pf = q.join(p)
1981 1981 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1982 1982 if not patchsuccess:
1983 1983 raise util.Abort(_('Error folding patch %s') % p)
1984 1984 patch.updatedir(ui, repo, files)
1985 1985
1986 1986 if not message:
1987 1987 ph = patchheader(q.join(parent))
1988 1988 message, user = ph.message, ph.user
1989 1989 for msg in messages:
1990 1990 message.append('* * *')
1991 1991 message.extend(msg)
1992 1992 message = '\n'.join(message)
1993 1993
1994 1994 if opts['edit']:
1995 1995 message = ui.edit(message, user or ui.username())
1996 1996
1997 1997 q.refresh(repo, msg=message)
1998 1998 q.delete(repo, patches, opts)
1999 1999 q.save_dirty()
2000 2000
2001 2001 def goto(ui, repo, patch, **opts):
2002 2002 '''push or pop patches until named patch is at top of stack'''
2003 2003 q = repo.mq
2004 2004 patch = q.lookup(patch)
2005 2005 if q.isapplied(patch):
2006 2006 ret = q.pop(repo, patch, force=opts['force'])
2007 2007 else:
2008 2008 ret = q.push(repo, patch, force=opts['force'])
2009 2009 q.save_dirty()
2010 2010 return ret
2011 2011
2012 2012 def guard(ui, repo, *args, **opts):
2013 2013 '''set or print guards for a patch
2014 2014
2015 2015 Guards control whether a patch can be pushed. A patch with no guards is
2016 2016 always pushed. A patch with a positive guard ("+foo") is pushed only if
2017 2017 the qselect command has activated it. A patch with a negative guard
2018 2018 ("-foo") is never pushed if the qselect command has activated it.
2019 2019
2020 2020 With no arguments, print the currently active guards. With arguments, set
2021 2021 guards for the named patch.
2022 2022 NOTE: Specifying negative guards now requires '--'.
2023 2023
2024 2024 To set guards on another patch:
2025 2025 hg qguard -- other.patch +2.6.17 -stable
2026 2026 '''
2027 2027 def status(idx):
2028 2028 guards = q.series_guards[idx] or ['unguarded']
2029 2029 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2030 2030 q = repo.mq
2031 2031 patch = None
2032 2032 args = list(args)
2033 2033 if opts['list']:
2034 2034 if args or opts['none']:
2035 2035 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2036 2036 for i in xrange(len(q.series)):
2037 2037 status(i)
2038 2038 return
2039 2039 if not args or args[0][0:1] in '-+':
2040 2040 if not q.applied:
2041 2041 raise util.Abort(_('no patches applied'))
2042 2042 patch = q.applied[-1].name
2043 2043 if patch is None and args[0][0:1] not in '-+':
2044 2044 patch = args.pop(0)
2045 2045 if patch is None:
2046 2046 raise util.Abort(_('no patch to work with'))
2047 2047 if args or opts['none']:
2048 2048 idx = q.find_series(patch)
2049 2049 if idx is None:
2050 2050 raise util.Abort(_('no patch named %s') % patch)
2051 2051 q.set_guards(idx, args)
2052 2052 q.save_dirty()
2053 2053 else:
2054 2054 status(q.series.index(q.lookup(patch)))
2055 2055
2056 2056 def header(ui, repo, patch=None):
2057 2057 """print the header of the topmost or specified patch"""
2058 2058 q = repo.mq
2059 2059
2060 2060 if patch:
2061 2061 patch = q.lookup(patch)
2062 2062 else:
2063 2063 if not q.applied:
2064 2064 ui.write('no patches applied\n')
2065 2065 return 1
2066 2066 patch = q.lookup('qtip')
2067 2067 ph = patchheader(repo.mq.join(patch))
2068 2068
2069 2069 ui.write('\n'.join(ph.message) + '\n')
2070 2070
2071 2071 def lastsavename(path):
2072 2072 (directory, base) = os.path.split(path)
2073 2073 names = os.listdir(directory)
2074 2074 namere = re.compile("%s.([0-9]+)" % base)
2075 2075 maxindex = None
2076 2076 maxname = None
2077 2077 for f in names:
2078 2078 m = namere.match(f)
2079 2079 if m:
2080 2080 index = int(m.group(1))
2081 2081 if maxindex is None or index > maxindex:
2082 2082 maxindex = index
2083 2083 maxname = f
2084 2084 if maxname:
2085 2085 return (os.path.join(directory, maxname), maxindex)
2086 2086 return (None, None)
2087 2087
2088 2088 def savename(path):
2089 2089 (last, index) = lastsavename(path)
2090 2090 if last is None:
2091 2091 index = 0
2092 2092 newpath = path + ".%d" % (index + 1)
2093 2093 return newpath
2094 2094
2095 2095 def push(ui, repo, patch=None, **opts):
2096 2096 """push the next patch onto the stack
2097 2097
2098 2098 When -f/--force is applied, all local changes in patched files will be
2099 2099 lost.
2100 2100 """
2101 2101 q = repo.mq
2102 2102 mergeq = None
2103 2103
2104 2104 if opts['merge']:
2105 2105 if opts['name']:
2106 2106 newpath = repo.join(opts['name'])
2107 2107 else:
2108 2108 newpath, i = lastsavename(q.path)
2109 2109 if not newpath:
2110 2110 ui.warn(_("no saved queues found, please use -n\n"))
2111 2111 return 1
2112 2112 mergeq = queue(ui, repo.join(""), newpath)
2113 2113 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2114 2114 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2115 2115 mergeq=mergeq, all=opts.get('all'))
2116 2116 return ret
2117 2117
2118 2118 def pop(ui, repo, patch=None, **opts):
2119 2119 """pop the current patch off the stack
2120 2120
2121 2121 By default, pops off the top of the patch stack. If given a patch name,
2122 2122 keeps popping off patches until the named patch is at the top of the
2123 2123 stack.
2124 2124 """
2125 2125 localupdate = True
2126 2126 if opts['name']:
2127 2127 q = queue(ui, repo.join(""), repo.join(opts['name']))
2128 2128 ui.warn(_('using patch queue: %s\n') % q.path)
2129 2129 localupdate = False
2130 2130 else:
2131 2131 q = repo.mq
2132 2132 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2133 2133 all=opts['all'])
2134 2134 q.save_dirty()
2135 2135 return ret
2136 2136
2137 2137 def rename(ui, repo, patch, name=None, **opts):
2138 2138 """rename a patch
2139 2139
2140 2140 With one argument, renames the current patch to PATCH1.
2141 2141 With two arguments, renames PATCH1 to PATCH2."""
2142 2142
2143 2143 q = repo.mq
2144 2144
2145 2145 if not name:
2146 2146 name = patch
2147 2147 patch = None
2148 2148
2149 2149 if patch:
2150 2150 patch = q.lookup(patch)
2151 2151 else:
2152 2152 if not q.applied:
2153 2153 ui.write(_('no patches applied\n'))
2154 2154 return
2155 2155 patch = q.lookup('qtip')
2156 2156 absdest = q.join(name)
2157 2157 if os.path.isdir(absdest):
2158 2158 name = normname(os.path.join(name, os.path.basename(patch)))
2159 2159 absdest = q.join(name)
2160 2160 if os.path.exists(absdest):
2161 2161 raise util.Abort(_('%s already exists') % absdest)
2162 2162
2163 2163 if name in q.series:
2164 2164 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2165 2165
2166 2166 if ui.verbose:
2167 2167 ui.write('renaming %s to %s\n' % (patch, name))
2168 2168 i = q.find_series(patch)
2169 2169 guards = q.guard_re.findall(q.full_series[i])
2170 2170 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2171 2171 q.parse_series()
2172 2172 q.series_dirty = 1
2173 2173
2174 2174 info = q.isapplied(patch)
2175 2175 if info:
2176 2176 q.applied[info[0]] = statusentry(info[1], name)
2177 2177 q.applied_dirty = 1
2178 2178
2179 2179 util.rename(q.join(patch), absdest)
2180 2180 r = q.qrepo()
2181 2181 if r:
2182 2182 wlock = r.wlock()
2183 2183 try:
2184 2184 if r.dirstate[patch] == 'a':
2185 2185 r.dirstate.forget(patch)
2186 2186 r.dirstate.add(name)
2187 2187 else:
2188 2188 if r.dirstate[name] == 'r':
2189 2189 r.undelete([name])
2190 2190 r.copy(patch, name)
2191 2191 r.remove([patch], False)
2192 2192 finally:
2193 2193 wlock.release()
2194 2194
2195 2195 q.save_dirty()
2196 2196
2197 2197 def restore(ui, repo, rev, **opts):
2198 2198 """restore the queue state saved by a revision"""
2199 2199 rev = repo.lookup(rev)
2200 2200 q = repo.mq
2201 2201 q.restore(repo, rev, delete=opts['delete'],
2202 2202 qupdate=opts['update'])
2203 2203 q.save_dirty()
2204 2204 return 0
2205 2205
2206 2206 def save(ui, repo, **opts):
2207 2207 """save current queue state"""
2208 2208 q = repo.mq
2209 2209 message = cmdutil.logmessage(opts)
2210 2210 ret = q.save(repo, msg=message)
2211 2211 if ret:
2212 2212 return ret
2213 2213 q.save_dirty()
2214 2214 if opts['copy']:
2215 2215 path = q.path
2216 2216 if opts['name']:
2217 2217 newpath = os.path.join(q.basepath, opts['name'])
2218 2218 if os.path.exists(newpath):
2219 2219 if not os.path.isdir(newpath):
2220 2220 raise util.Abort(_('destination %s exists and is not '
2221 2221 'a directory') % newpath)
2222 2222 if not opts['force']:
2223 2223 raise util.Abort(_('destination %s exists, '
2224 2224 'use -f to force') % newpath)
2225 2225 else:
2226 2226 newpath = savename(path)
2227 2227 ui.warn(_("copy %s to %s\n") % (path, newpath))
2228 2228 util.copyfiles(path, newpath)
2229 2229 if opts['empty']:
2230 2230 try:
2231 2231 os.unlink(q.join(q.status_path))
2232 2232 except:
2233 2233 pass
2234 2234 return 0
2235 2235
2236 2236 def strip(ui, repo, rev, **opts):
2237 2237 """strip a revision and all its descendants from the repository
2238 2238
2239 2239 If one of the working directory's parent revisions is stripped, the
2240 2240 working directory will be updated to the parent of the stripped revision.
2241 2241 """
2242 2242 backup = 'all'
2243 2243 if opts['backup']:
2244 2244 backup = 'strip'
2245 2245 elif opts['nobackup']:
2246 2246 backup = 'none'
2247 2247
2248 2248 rev = repo.lookup(rev)
2249 2249 p = repo.dirstate.parents()
2250 2250 cl = repo.changelog
2251 2251 update = True
2252 2252 if p[0] == nullid:
2253 2253 update = False
2254 2254 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2255 2255 update = False
2256 2256 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2257 2257 update = False
2258 2258
2259 2259 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2260 2260 return 0
2261 2261
2262 2262 def select(ui, repo, *args, **opts):
2263 2263 '''set or print guarded patches to push
2264 2264
2265 2265 Use the qguard command to set or print guards on patch, then use qselect
2266 2266 to tell mq which guards to use. A patch will be pushed if it has no guards
2267 2267 or any positive guards match the currently selected guard, but will not be
2268 2268 pushed if any negative guards match the current guard. For example:
2269 2269
2270 2270 qguard foo.patch -stable (negative guard)
2271 2271 qguard bar.patch +stable (positive guard)
2272 2272 qselect stable
2273 2273
2274 2274 This activates the "stable" guard. mq will skip foo.patch (because it has
2275 2275 a negative match) but push bar.patch (because it has a positive match).
2276 2276
2277 2277 With no arguments, prints the currently active guards. With one argument,
2278 2278 sets the active guard.
2279 2279
2280 2280 Use -n/--none to deactivate guards (no other arguments needed). When no
2281 2281 guards are active, patches with positive guards are skipped and patches
2282 2282 with negative guards are pushed.
2283 2283
2284 2284 qselect can change the guards on applied patches. It does not pop guarded
2285 2285 patches by default. Use --pop to pop back to the last applied patch that
2286 2286 is not guarded. Use --reapply (which implies --pop) to push back to the
2287 2287 current patch afterwards, but skip guarded patches.
2288 2288
2289 2289 Use -s/--series to print a list of all guards in the series file (no other
2290 2290 arguments needed). Use -v for more information.
2291 2291 '''
2292 2292
2293 2293 q = repo.mq
2294 2294 guards = q.active()
2295 2295 if args or opts['none']:
2296 2296 old_unapplied = q.unapplied(repo)
2297 2297 old_guarded = [i for i in xrange(len(q.applied)) if
2298 2298 not q.pushable(i)[0]]
2299 2299 q.set_active(args)
2300 2300 q.save_dirty()
2301 2301 if not args:
2302 2302 ui.status(_('guards deactivated\n'))
2303 2303 if not opts['pop'] and not opts['reapply']:
2304 2304 unapplied = q.unapplied(repo)
2305 2305 guarded = [i for i in xrange(len(q.applied))
2306 2306 if not q.pushable(i)[0]]
2307 2307 if len(unapplied) != len(old_unapplied):
2308 2308 ui.status(_('number of unguarded, unapplied patches has '
2309 2309 'changed from %d to %d\n') %
2310 2310 (len(old_unapplied), len(unapplied)))
2311 2311 if len(guarded) != len(old_guarded):
2312 2312 ui.status(_('number of guarded, applied patches has changed '
2313 2313 'from %d to %d\n') %
2314 2314 (len(old_guarded), len(guarded)))
2315 2315 elif opts['series']:
2316 2316 guards = {}
2317 2317 noguards = 0
2318 2318 for gs in q.series_guards:
2319 2319 if not gs:
2320 2320 noguards += 1
2321 2321 for g in gs:
2322 2322 guards.setdefault(g, 0)
2323 2323 guards[g] += 1
2324 2324 if ui.verbose:
2325 2325 guards['NONE'] = noguards
2326 2326 guards = guards.items()
2327 2327 guards.sort(key=lambda x: x[0][1:])
2328 2328 if guards:
2329 2329 ui.note(_('guards in series file:\n'))
2330 2330 for guard, count in guards:
2331 2331 ui.note('%2d ' % count)
2332 2332 ui.write(guard, '\n')
2333 2333 else:
2334 2334 ui.note(_('no guards in series file\n'))
2335 2335 else:
2336 2336 if guards:
2337 2337 ui.note(_('active guards:\n'))
2338 2338 for g in guards:
2339 2339 ui.write(g, '\n')
2340 2340 else:
2341 2341 ui.write(_('no active guards\n'))
2342 2342 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2343 2343 popped = False
2344 2344 if opts['pop'] or opts['reapply']:
2345 2345 for i in xrange(len(q.applied)):
2346 2346 pushable, reason = q.pushable(i)
2347 2347 if not pushable:
2348 2348 ui.status(_('popping guarded patches\n'))
2349 2349 popped = True
2350 2350 if i == 0:
2351 2351 q.pop(repo, all=True)
2352 2352 else:
2353 2353 q.pop(repo, i-1)
2354 2354 break
2355 2355 if popped:
2356 2356 try:
2357 2357 if reapply:
2358 2358 ui.status(_('reapplying unguarded patches\n'))
2359 2359 q.push(repo, reapply)
2360 2360 finally:
2361 2361 q.save_dirty()
2362 2362
2363 2363 def finish(ui, repo, *revrange, **opts):
2364 2364 """move applied patches into repository history
2365 2365
2366 2366 Finishes the specified revisions (corresponding to applied patches) by
2367 2367 moving them out of mq control into regular repository history.
2368 2368
2369 2369 Accepts a revision range or the -a/--applied option. If --applied is
2370 2370 specified, all applied mq revisions are removed from mq control.
2371 2371 Otherwise, the given revisions must be at the base of the stack of applied
2372 2372 patches.
2373 2373
2374 2374 This can be especially useful if your changes have been applied to an
2375 2375 upstream repository, or if you are about to push your changes to upstream.
2376 2376 """
2377 2377 if not opts['applied'] and not revrange:
2378 2378 raise util.Abort(_('no revisions specified'))
2379 2379 elif opts['applied']:
2380 2380 revrange = ('qbase:qtip',) + revrange
2381 2381
2382 2382 q = repo.mq
2383 2383 if not q.applied:
2384 2384 ui.status(_('no patches applied\n'))
2385 2385 return 0
2386 2386
2387 2387 revs = cmdutil.revrange(repo, revrange)
2388 2388 q.finish(repo, revs)
2389 2389 q.save_dirty()
2390 2390 return 0
2391 2391
2392 2392 def reposetup(ui, repo):
2393 2393 class mqrepo(repo.__class__):
2394 2394 @util.propertycache
2395 2395 def mq(self):
2396 2396 return queue(self.ui, self.join(""))
2397 2397
2398 2398 def abort_if_wdir_patched(self, errmsg, force=False):
2399 2399 if self.mq.applied and not force:
2400 2400 parent = hex(self.dirstate.parents()[0])
2401 2401 if parent in [s.rev for s in self.mq.applied]:
2402 2402 raise util.Abort(errmsg)
2403 2403
2404 2404 def commit(self, text="", user=None, date=None, match=None,
2405 2405 force=False, editor=False, extra={}):
2406 2406 self.abort_if_wdir_patched(
2407 2407 _('cannot commit over an applied mq patch'),
2408 2408 force)
2409 2409
2410 2410 return super(mqrepo, self).commit(text, user, date, match, force,
2411 2411 editor, extra)
2412 2412
2413 2413 def push(self, remote, force=False, revs=None):
2414 2414 if self.mq.applied and not force and not revs:
2415 2415 raise util.Abort(_('source has mq patches applied'))
2416 2416 return super(mqrepo, self).push(remote, force, revs)
2417 2417
2418 def tags(self):
2419 if self.tagscache:
2420 return self.tagscache
2421
2422 tagscache = super(mqrepo, self).tags()
2418 def _findtags(self):
2419 '''augment tags from base class with patch tags'''
2420 result = super(mqrepo, self)._findtags()
2423 2421
2424 2422 q = self.mq
2425 2423 if not q.applied:
2426 return tagscache
2424 return result
2427 2425
2428 2426 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2429 2427
2430 2428 if mqtags[-1][0] not in self.changelog.nodemap:
2431 2429 self.ui.warn(_('mq status file refers to unknown node %s\n')
2432 2430 % short(mqtags[-1][0]))
2433 return tagscache
2431 return result
2434 2432
2435 2433 mqtags.append((mqtags[-1][0], 'qtip'))
2436 2434 mqtags.append((mqtags[0][0], 'qbase'))
2437 2435 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2436 tags = result[0]
2438 2437 for patch in mqtags:
2439 if patch[1] in tagscache:
2438 if patch[1] in tags:
2440 2439 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2441 2440 % patch[1])
2442 2441 else:
2443 tagscache[patch[1]] = patch[0]
2442 tags[patch[1]] = patch[0]
2444 2443
2445 return tagscache
2444 return result
2446 2445
2447 2446 def _branchtags(self, partial, lrev):
2448 2447 q = self.mq
2449 2448 if not q.applied:
2450 2449 return super(mqrepo, self)._branchtags(partial, lrev)
2451 2450
2452 2451 cl = self.changelog
2453 2452 qbasenode = bin(q.applied[0].rev)
2454 2453 if qbasenode not in cl.nodemap:
2455 2454 self.ui.warn(_('mq status file refers to unknown node %s\n')
2456 2455 % short(qbasenode))
2457 2456 return super(mqrepo, self)._branchtags(partial, lrev)
2458 2457
2459 2458 qbase = cl.rev(qbasenode)
2460 2459 start = lrev + 1
2461 2460 if start < qbase:
2462 2461 # update the cache (excluding the patches) and save it
2463 2462 self._updatebranchcache(partial, lrev+1, qbase)
2464 2463 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2465 2464 start = qbase
2466 2465 # if start = qbase, the cache is as updated as it should be.
2467 2466 # if start > qbase, the cache includes (part of) the patches.
2468 2467 # we might as well use it, but we won't save it.
2469 2468
2470 2469 # update the cache up to the tip
2471 2470 self._updatebranchcache(partial, start, len(cl))
2472 2471
2473 2472 return partial
2474 2473
2475 2474 if repo.local():
2476 2475 repo.__class__ = mqrepo
2477 2476
2478 2477 def mqimport(orig, ui, repo, *args, **kwargs):
2479 2478 if hasattr(repo, 'abort_if_wdir_patched'):
2480 2479 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2481 2480 kwargs.get('force'))
2482 2481 return orig(ui, repo, *args, **kwargs)
2483 2482
2484 2483 def uisetup(ui):
2485 2484 extensions.wrapcommand(commands.table, 'import', mqimport)
2486 2485
2487 2486 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2488 2487
2489 2488 cmdtable = {
2490 2489 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2491 2490 "qclone":
2492 2491 (clone,
2493 2492 [('', 'pull', None, _('use pull protocol to copy metadata')),
2494 2493 ('U', 'noupdate', None, _('do not update the new working directories')),
2495 2494 ('', 'uncompressed', None,
2496 2495 _('use uncompressed transfer (fast over LAN)')),
2497 2496 ('p', 'patches', '', _('location of source patch repository')),
2498 2497 ] + commands.remoteopts,
2499 2498 _('hg qclone [OPTION]... SOURCE [DEST]')),
2500 2499 "qcommit|qci":
2501 2500 (commit,
2502 2501 commands.table["^commit|ci"][1],
2503 2502 _('hg qcommit [OPTION]... [FILE]...')),
2504 2503 "^qdiff":
2505 2504 (diff,
2506 2505 commands.diffopts + commands.diffopts2 + commands.walkopts,
2507 2506 _('hg qdiff [OPTION]... [FILE]...')),
2508 2507 "qdelete|qremove|qrm":
2509 2508 (delete,
2510 2509 [('k', 'keep', None, _('keep patch file')),
2511 2510 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2512 2511 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2513 2512 'qfold':
2514 2513 (fold,
2515 2514 [('e', 'edit', None, _('edit patch header')),
2516 2515 ('k', 'keep', None, _('keep folded patch files')),
2517 2516 ] + commands.commitopts,
2518 2517 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2519 2518 'qgoto':
2520 2519 (goto,
2521 2520 [('f', 'force', None, _('overwrite any local changes'))],
2522 2521 _('hg qgoto [OPTION]... PATCH')),
2523 2522 'qguard':
2524 2523 (guard,
2525 2524 [('l', 'list', None, _('list all patches and guards')),
2526 2525 ('n', 'none', None, _('drop all guards'))],
2527 2526 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2528 2527 'qheader': (header, [], _('hg qheader [PATCH]')),
2529 2528 "^qimport":
2530 2529 (qimport,
2531 2530 [('e', 'existing', None, _('import file in patch directory')),
2532 2531 ('n', 'name', '', _('name of patch file')),
2533 2532 ('f', 'force', None, _('overwrite existing files')),
2534 2533 ('r', 'rev', [], _('place existing revisions under mq control')),
2535 2534 ('g', 'git', None, _('use git extended diff format')),
2536 2535 ('P', 'push', None, _('qpush after importing'))],
2537 2536 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2538 2537 "^qinit":
2539 2538 (init,
2540 2539 [('c', 'create-repo', None, _('create queue repository'))],
2541 2540 _('hg qinit [-c]')),
2542 2541 "qnew":
2543 2542 (new,
2544 2543 [('e', 'edit', None, _('edit commit message')),
2545 2544 ('f', 'force', None, _('import uncommitted changes into patch')),
2546 2545 ('g', 'git', None, _('use git extended diff format')),
2547 2546 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2548 2547 ('u', 'user', '', _('add "From: <given user>" to patch')),
2549 2548 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2550 2549 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2551 2550 ] + commands.walkopts + commands.commitopts,
2552 2551 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2553 2552 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2554 2553 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2555 2554 "^qpop":
2556 2555 (pop,
2557 2556 [('a', 'all', None, _('pop all patches')),
2558 2557 ('n', 'name', '', _('queue name to pop')),
2559 2558 ('f', 'force', None, _('forget any local changes'))],
2560 2559 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2561 2560 "^qpush":
2562 2561 (push,
2563 2562 [('f', 'force', None, _('apply if the patch has rejects')),
2564 2563 ('l', 'list', None, _('list patch name in commit text')),
2565 2564 ('a', 'all', None, _('apply all patches')),
2566 2565 ('m', 'merge', None, _('merge from another queue')),
2567 2566 ('n', 'name', '', _('merge queue name'))],
2568 2567 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2569 2568 "^qrefresh":
2570 2569 (refresh,
2571 2570 [('e', 'edit', None, _('edit commit message')),
2572 2571 ('g', 'git', None, _('use git extended diff format')),
2573 2572 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2574 2573 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2575 2574 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2576 2575 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2577 2576 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2578 2577 ] + commands.walkopts + commands.commitopts,
2579 2578 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2580 2579 'qrename|qmv':
2581 2580 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2582 2581 "qrestore":
2583 2582 (restore,
2584 2583 [('d', 'delete', None, _('delete save entry')),
2585 2584 ('u', 'update', None, _('update queue working directory'))],
2586 2585 _('hg qrestore [-d] [-u] REV')),
2587 2586 "qsave":
2588 2587 (save,
2589 2588 [('c', 'copy', None, _('copy patch directory')),
2590 2589 ('n', 'name', '', _('copy directory name')),
2591 2590 ('e', 'empty', None, _('clear queue status file')),
2592 2591 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2593 2592 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2594 2593 "qselect":
2595 2594 (select,
2596 2595 [('n', 'none', None, _('disable all guards')),
2597 2596 ('s', 'series', None, _('list all guards in series file')),
2598 2597 ('', 'pop', None, _('pop to before first guarded applied patch')),
2599 2598 ('', 'reapply', None, _('pop, then reapply patches'))],
2600 2599 _('hg qselect [OPTION]... [GUARD]...')),
2601 2600 "qseries":
2602 2601 (series,
2603 2602 [('m', 'missing', None, _('print patches not in series')),
2604 2603 ] + seriesopts,
2605 2604 _('hg qseries [-ms]')),
2606 2605 "^strip":
2607 2606 (strip,
2608 2607 [('f', 'force', None, _('force removal with local changes')),
2609 2608 ('b', 'backup', None, _('bundle unrelated changesets')),
2610 2609 ('n', 'nobackup', None, _('no backups'))],
2611 2610 _('hg strip [-f] [-b] [-n] REV')),
2612 2611 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2613 2612 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2614 2613 "qfinish":
2615 2614 (finish,
2616 2615 [('a', 'applied', None, _('finish all applied changesets'))],
2617 2616 _('hg qfinish [-a] [REV]...')),
2618 2617 }
@@ -1,2180 +1,2194 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 from lock import release
17 17 import weakref, stat, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19
20 20 class localrepository(repo.repository):
21 21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 22 supported = set('revlogv1 store fncache shared'.split())
23 23
24 24 def __init__(self, baseui, path=None, create=0):
25 25 repo.repository.__init__(self)
26 26 self.root = os.path.realpath(path)
27 27 self.path = os.path.join(self.root, ".hg")
28 28 self.origroot = path
29 29 self.opener = util.opener(self.path)
30 30 self.wopener = util.opener(self.root)
31 31 self.baseui = baseui
32 32 self.ui = baseui.copy()
33 33
34 34 try:
35 35 self.ui.readconfig(self.join("hgrc"), self.root)
36 36 extensions.loadall(self.ui)
37 37 except IOError:
38 38 pass
39 39
40 40 if not os.path.isdir(self.path):
41 41 if create:
42 42 if not os.path.exists(path):
43 43 os.mkdir(path)
44 44 os.mkdir(self.path)
45 45 requirements = ["revlogv1"]
46 46 if self.ui.configbool('format', 'usestore', True):
47 47 os.mkdir(os.path.join(self.path, "store"))
48 48 requirements.append("store")
49 49 if self.ui.configbool('format', 'usefncache', True):
50 50 requirements.append("fncache")
51 51 # create an invalid changelog
52 52 self.opener("00changelog.i", "a").write(
53 53 '\0\0\0\2' # represents revlogv2
54 54 ' dummy changelog to prevent using the old repo layout'
55 55 )
56 56 reqfile = self.opener("requires", "w")
57 57 for r in requirements:
58 58 reqfile.write("%s\n" % r)
59 59 reqfile.close()
60 60 else:
61 61 raise error.RepoError(_("repository %s not found") % path)
62 62 elif create:
63 63 raise error.RepoError(_("repository %s already exists") % path)
64 64 else:
65 65 # find requirements
66 66 requirements = set()
67 67 try:
68 68 requirements = set(self.opener("requires").read().splitlines())
69 69 except IOError, inst:
70 70 if inst.errno != errno.ENOENT:
71 71 raise
72 72 for r in requirements - self.supported:
73 73 raise error.RepoError(_("requirement '%s' not supported") % r)
74 74
75 75 self.sharedpath = self.path
76 76 try:
77 77 s = os.path.realpath(self.opener("sharedpath").read())
78 78 if not os.path.exists(s):
79 79 raise error.RepoError(
80 80 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 81 self.sharedpath = s
82 82 except IOError, inst:
83 83 if inst.errno != errno.ENOENT:
84 84 raise
85 85
86 86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 87 self.spath = self.store.path
88 88 self.sopener = self.store.opener
89 89 self.sjoin = self.store.join
90 90 self.opener.createmode = self.store.createmode
91 91
92 92 self.tagscache = None
93 93 self._tagstypecache = None
94 94 self.branchcache = None
95 95 self._ubranchcache = None # UTF-8 version of branchcache
96 96 self._branchcachetip = None
97 97 self.nodetagscache = None
98 98 self.filterpats = {}
99 99 self._datafilters = {}
100 100 self._transref = self._lockref = self._wlockref = None
101 101
102 102 @propertycache
103 103 def changelog(self):
104 104 c = changelog.changelog(self.sopener)
105 105 if 'HG_PENDING' in os.environ:
106 106 p = os.environ['HG_PENDING']
107 107 if p.startswith(self.root):
108 108 c.readpending('00changelog.i.a')
109 109 self.sopener.defversion = c.version
110 110 return c
111 111
112 112 @propertycache
113 113 def manifest(self):
114 114 return manifest.manifest(self.sopener)
115 115
116 116 @propertycache
117 117 def dirstate(self):
118 118 return dirstate.dirstate(self.opener, self.ui, self.root)
119 119
120 120 def __getitem__(self, changeid):
121 121 if changeid is None:
122 122 return context.workingctx(self)
123 123 return context.changectx(self, changeid)
124 124
125 125 def __nonzero__(self):
126 126 return True
127 127
128 128 def __len__(self):
129 129 return len(self.changelog)
130 130
131 131 def __iter__(self):
132 132 for i in xrange(len(self)):
133 133 yield i
134 134
135 135 def url(self):
136 136 return 'file:' + self.root
137 137
138 138 def hook(self, name, throw=False, **args):
139 139 return hook.hook(self.ui, self, name, throw, **args)
140 140
141 141 tag_disallowed = ':\r\n'
142 142
143 143 def _tag(self, names, node, message, local, user, date, extra={}):
144 144 if isinstance(names, str):
145 145 allchars = names
146 146 names = (names,)
147 147 else:
148 148 allchars = ''.join(names)
149 149 for c in self.tag_disallowed:
150 150 if c in allchars:
151 151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152 152
153 153 for name in names:
154 154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 155 local=local)
156 156
157 157 def writetags(fp, names, munge, prevtags):
158 158 fp.seek(0, 2)
159 159 if prevtags and prevtags[-1] != '\n':
160 160 fp.write('\n')
161 161 for name in names:
162 162 m = munge and munge(name) or name
163 163 if self._tagstypecache and name in self._tagstypecache:
164 164 old = self.tagscache.get(name, nullid)
165 165 fp.write('%s %s\n' % (hex(old), m))
166 166 fp.write('%s %s\n' % (hex(node), m))
167 167 fp.close()
168 168
169 169 prevtags = ''
170 170 if local:
171 171 try:
172 172 fp = self.opener('localtags', 'r+')
173 173 except IOError:
174 174 fp = self.opener('localtags', 'a')
175 175 else:
176 176 prevtags = fp.read()
177 177
178 178 # local tags are stored in the current charset
179 179 writetags(fp, names, None, prevtags)
180 180 for name in names:
181 181 self.hook('tag', node=hex(node), tag=name, local=local)
182 182 return
183 183
184 184 try:
185 185 fp = self.wfile('.hgtags', 'rb+')
186 186 except IOError:
187 187 fp = self.wfile('.hgtags', 'ab')
188 188 else:
189 189 prevtags = fp.read()
190 190
191 191 # committed tags are stored in UTF-8
192 192 writetags(fp, names, encoding.fromlocal, prevtags)
193 193
194 194 if '.hgtags' not in self.dirstate:
195 195 self.add(['.hgtags'])
196 196
197 197 m = match_.exact(self.root, '', ['.hgtags'])
198 198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199 199
200 200 for name in names:
201 201 self.hook('tag', node=hex(node), tag=name, local=local)
202 202
203 203 return tagnode
204 204
205 205 def tag(self, names, node, message, local, user, date):
206 206 '''tag a revision with one or more symbolic names.
207 207
208 208 names is a list of strings or, when adding a single tag, names may be a
209 209 string.
210 210
211 211 if local is True, the tags are stored in a per-repository file.
212 212 otherwise, they are stored in the .hgtags file, and a new
213 213 changeset is committed with the change.
214 214
215 215 keyword arguments:
216 216
217 217 local: whether to store tags in non-version-controlled file
218 218 (default False)
219 219
220 220 message: commit message to use if committing
221 221
222 222 user: name of user to use if committing
223 223
224 224 date: date tuple to use if committing'''
225 225
226 226 for x in self.status()[:5]:
227 227 if '.hgtags' in x:
228 228 raise util.Abort(_('working copy of .hgtags is changed '
229 229 '(please commit .hgtags manually)'))
230 230
231 231 self.tags() # instantiate the cache
232 232 self._tag(names, node, message, local, user, date)
233 233
234 234 def tags(self):
235 235 '''return a mapping of tag to node'''
236 if self.tagscache:
236 if self.tagscache is None:
237 (self.tagscache, self._tagstypecache) = self._findtags()
238
237 239 return self.tagscache
238 240
241 def _findtags(self):
242 '''Do the hard work of finding tags. Return a pair of dicts
243 (tags, tagtypes) where tags maps tag name to node, and tagtypes
244 maps tag name to a string like \'global\' or \'local\'.
245 Subclasses or extensions are free to add their own tags, but
246 should be aware that the returned dicts will be retained for the
247 duration of the localrepo object.'''
248
249 # XXX what tagtype should subclasses/extensions use? Currently
250 # mq and bookmarks add tags, but do not set the tagtype at all.
251 # Should each extension invent its own tag type? Should there
252 # be one tagtype for all such "virtual" tags? Or is the status
253 # quo fine?
254
239 255 globaltags = {}
240 256 tagtypes = {}
241 257
242 258 def readtags(lines, fn, tagtype):
243 259 filetags = {}
244 260 count = 0
245 261
246 262 def warn(msg):
247 263 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248 264
249 265 for l in lines:
250 266 count += 1
251 267 if not l:
252 268 continue
253 269 s = l.split(" ", 1)
254 270 if len(s) != 2:
255 271 warn(_("cannot parse entry"))
256 272 continue
257 273 node, key = s
258 274 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 275 try:
260 276 bin_n = bin(node)
261 277 except TypeError:
262 278 warn(_("node '%s' is not well formed") % node)
263 279 continue
264 280 if bin_n not in self.changelog.nodemap:
265 281 # silently ignore as pull -r might cause this
266 282 continue
267 283
268 284 h = []
269 285 if key in filetags:
270 286 n, h = filetags[key]
271 287 h.append(n)
272 288 filetags[key] = (bin_n, h)
273 289
274 290 for k, nh in filetags.iteritems():
275 291 if k not in globaltags:
276 292 globaltags[k] = nh
277 293 tagtypes[k] = tagtype
278 294 continue
279 295
280 296 # we prefer the global tag if:
281 297 # it supercedes us OR
282 298 # mutual supercedes and it has a higher rank
283 299 # otherwise we win because we're tip-most
284 300 an, ah = nh
285 301 bn, bh = globaltags[k]
286 302 if (bn != an and an in bh and
287 303 (bn not in ah or len(bh) > len(ah))):
288 304 an = bn
289 305 ah.extend([n for n in bh if n not in ah])
290 306 globaltags[k] = an, ah
291 307 tagtypes[k] = tagtype
292 308
293 309 seen = set()
294 310 f = None
295 311 ctxs = []
296 312 for node in self.heads():
297 313 try:
298 314 fnode = self[node].filenode('.hgtags')
299 315 except error.LookupError:
300 316 continue
301 317 if fnode not in seen:
302 318 seen.add(fnode)
303 319 if not f:
304 320 f = self.filectx('.hgtags', fileid=fnode)
305 321 else:
306 322 f = f.filectx(fnode)
307 323 ctxs.append(f)
308 324
309 325 # read the tags file from each head, ending with the tip
310 326 for f in reversed(ctxs):
311 327 readtags(f.data().splitlines(), f, "global")
312 328
313 329 try:
314 330 data = encoding.fromlocal(self.opener("localtags").read())
315 331 # localtags are stored in the local character set
316 332 # while the internal tag table is stored in UTF-8
317 333 readtags(data.splitlines(), "localtags", "local")
318 334 except IOError:
319 335 pass
320 336
321 self.tagscache = {}
322 self._tagstypecache = {}
337 tags = {}
323 338 for k, nh in globaltags.iteritems():
324 339 n = nh[0]
325 340 if n != nullid:
326 self.tagscache[k] = n
327 self._tagstypecache[k] = tagtypes[k]
328 self.tagscache['tip'] = self.changelog.tip()
329 return self.tagscache
341 tags[k] = n
342 tags['tip'] = self.changelog.tip()
343 return (tags, tagtypes)
330 344
331 345 def tagtype(self, tagname):
332 346 '''
333 347 return the type of the given tag. result can be:
334 348
335 349 'local' : a local tag
336 350 'global' : a global tag
337 351 None : tag does not exist
338 352 '''
339 353
340 354 self.tags()
341 355
342 356 return self._tagstypecache.get(tagname)
343 357
344 358 def tagslist(self):
345 359 '''return a list of tags ordered by revision'''
346 360 l = []
347 361 for t, n in self.tags().iteritems():
348 362 try:
349 363 r = self.changelog.rev(n)
350 364 except:
351 365 r = -2 # sort to the beginning of the list if unknown
352 366 l.append((r, t, n))
353 367 return [(t, n) for r, t, n in sorted(l)]
354 368
355 369 def nodetags(self, node):
356 370 '''return the tags associated with a node'''
357 371 if not self.nodetagscache:
358 372 self.nodetagscache = {}
359 373 for t, n in self.tags().iteritems():
360 374 self.nodetagscache.setdefault(n, []).append(t)
361 375 return self.nodetagscache.get(node, [])
362 376
363 377 def _branchtags(self, partial, lrev):
364 378 # TODO: rename this function?
365 379 tiprev = len(self) - 1
366 380 if lrev != tiprev:
367 381 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 382 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369 383
370 384 return partial
371 385
372 386 def branchmap(self):
373 387 tip = self.changelog.tip()
374 388 if self.branchcache is not None and self._branchcachetip == tip:
375 389 return self.branchcache
376 390
377 391 oldtip = self._branchcachetip
378 392 self._branchcachetip = tip
379 393 if self.branchcache is None:
380 394 self.branchcache = {} # avoid recursion in changectx
381 395 else:
382 396 self.branchcache.clear() # keep using the same dict
383 397 if oldtip is None or oldtip not in self.changelog.nodemap:
384 398 partial, last, lrev = self._readbranchcache()
385 399 else:
386 400 lrev = self.changelog.rev(oldtip)
387 401 partial = self._ubranchcache
388 402
389 403 self._branchtags(partial, lrev)
390 404 # this private cache holds all heads (not just tips)
391 405 self._ubranchcache = partial
392 406
393 407 # the branch cache is stored on disk as UTF-8, but in the local
394 408 # charset internally
395 409 for k, v in partial.iteritems():
396 410 self.branchcache[encoding.tolocal(k)] = v
397 411 return self.branchcache
398 412
399 413
400 414 def branchtags(self):
401 415 '''return a dict where branch names map to the tipmost head of
402 416 the branch, open heads come before closed'''
403 417 bt = {}
404 418 for bn, heads in self.branchmap().iteritems():
405 419 head = None
406 420 for i in range(len(heads)-1, -1, -1):
407 421 h = heads[i]
408 422 if 'close' not in self.changelog.read(h)[5]:
409 423 head = h
410 424 break
411 425 # no open heads were found
412 426 if head is None:
413 427 head = heads[-1]
414 428 bt[bn] = head
415 429 return bt
416 430
417 431
418 432 def _readbranchcache(self):
419 433 partial = {}
420 434 try:
421 435 f = self.opener("branchheads.cache")
422 436 lines = f.read().split('\n')
423 437 f.close()
424 438 except (IOError, OSError):
425 439 return {}, nullid, nullrev
426 440
427 441 try:
428 442 last, lrev = lines.pop(0).split(" ", 1)
429 443 last, lrev = bin(last), int(lrev)
430 444 if lrev >= len(self) or self[lrev].node() != last:
431 445 # invalidate the cache
432 446 raise ValueError('invalidating branch cache (tip differs)')
433 447 for l in lines:
434 448 if not l: continue
435 449 node, label = l.split(" ", 1)
436 450 partial.setdefault(label.strip(), []).append(bin(node))
437 451 except KeyboardInterrupt:
438 452 raise
439 453 except Exception, inst:
440 454 if self.ui.debugflag:
441 455 self.ui.warn(str(inst), '\n')
442 456 partial, last, lrev = {}, nullid, nullrev
443 457 return partial, last, lrev
444 458
445 459 def _writebranchcache(self, branches, tip, tiprev):
446 460 try:
447 461 f = self.opener("branchheads.cache", "w", atomictemp=True)
448 462 f.write("%s %s\n" % (hex(tip), tiprev))
449 463 for label, nodes in branches.iteritems():
450 464 for node in nodes:
451 465 f.write("%s %s\n" % (hex(node), label))
452 466 f.rename()
453 467 except (IOError, OSError):
454 468 pass
455 469
456 470 def _updatebranchcache(self, partial, start, end):
457 471 # collect new branch entries
458 472 newbranches = {}
459 473 for r in xrange(start, end):
460 474 c = self[r]
461 475 newbranches.setdefault(c.branch(), []).append(c.node())
462 476 # if older branchheads are reachable from new ones, they aren't
463 477 # really branchheads. Note checking parents is insufficient:
464 478 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
465 479 for branch, newnodes in newbranches.iteritems():
466 480 bheads = partial.setdefault(branch, [])
467 481 bheads.extend(newnodes)
468 482 if len(bheads) < 2:
469 483 continue
470 484 newbheads = []
471 485 # starting from tip means fewer passes over reachable
472 486 while newnodes:
473 487 latest = newnodes.pop()
474 488 if latest not in bheads:
475 489 continue
476 490 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
477 491 reachable = self.changelog.reachable(latest, minbhrev)
478 492 bheads = [b for b in bheads if b not in reachable]
479 493 newbheads.insert(0, latest)
480 494 bheads.extend(newbheads)
481 495 partial[branch] = bheads
482 496
483 497 def lookup(self, key):
484 498 if isinstance(key, int):
485 499 return self.changelog.node(key)
486 500 elif key == '.':
487 501 return self.dirstate.parents()[0]
488 502 elif key == 'null':
489 503 return nullid
490 504 elif key == 'tip':
491 505 return self.changelog.tip()
492 506 n = self.changelog._match(key)
493 507 if n:
494 508 return n
495 509 if key in self.tags():
496 510 return self.tags()[key]
497 511 if key in self.branchtags():
498 512 return self.branchtags()[key]
499 513 n = self.changelog._partialmatch(key)
500 514 if n:
501 515 return n
502 516
503 517 # can't find key, check if it might have come from damaged dirstate
504 518 if key in self.dirstate.parents():
505 519 raise error.Abort(_("working directory has unknown parent '%s'!")
506 520 % short(key))
507 521 try:
508 522 if len(key) == 20:
509 523 key = hex(key)
510 524 except:
511 525 pass
512 526 raise error.RepoError(_("unknown revision '%s'") % key)
513 527
514 528 def local(self):
515 529 return True
516 530
517 531 def join(self, f):
518 532 return os.path.join(self.path, f)
519 533
520 534 def wjoin(self, f):
521 535 return os.path.join(self.root, f)
522 536
523 537 def rjoin(self, f):
524 538 return os.path.join(self.root, util.pconvert(f))
525 539
526 540 def file(self, f):
527 541 if f[0] == '/':
528 542 f = f[1:]
529 543 return filelog.filelog(self.sopener, f)
530 544
531 545 def changectx(self, changeid):
532 546 return self[changeid]
533 547
534 548 def parents(self, changeid=None):
535 549 '''get list of changectxs for parents of changeid'''
536 550 return self[changeid].parents()
537 551
538 552 def filectx(self, path, changeid=None, fileid=None):
539 553 """changeid can be a changeset revision, node, or tag.
540 554 fileid can be a file revision or node."""
541 555 return context.filectx(self, path, changeid, fileid)
542 556
543 557 def getcwd(self):
544 558 return self.dirstate.getcwd()
545 559
546 560 def pathto(self, f, cwd=None):
547 561 return self.dirstate.pathto(f, cwd)
548 562
549 563 def wfile(self, f, mode='r'):
550 564 return self.wopener(f, mode)
551 565
552 566 def _link(self, f):
553 567 return os.path.islink(self.wjoin(f))
554 568
555 569 def _filter(self, filter, filename, data):
556 570 if filter not in self.filterpats:
557 571 l = []
558 572 for pat, cmd in self.ui.configitems(filter):
559 573 if cmd == '!':
560 574 continue
561 575 mf = match_.match(self.root, '', [pat])
562 576 fn = None
563 577 params = cmd
564 578 for name, filterfn in self._datafilters.iteritems():
565 579 if cmd.startswith(name):
566 580 fn = filterfn
567 581 params = cmd[len(name):].lstrip()
568 582 break
569 583 if not fn:
570 584 fn = lambda s, c, **kwargs: util.filter(s, c)
571 585 # Wrap old filters not supporting keyword arguments
572 586 if not inspect.getargspec(fn)[2]:
573 587 oldfn = fn
574 588 fn = lambda s, c, **kwargs: oldfn(s, c)
575 589 l.append((mf, fn, params))
576 590 self.filterpats[filter] = l
577 591
578 592 for mf, fn, cmd in self.filterpats[filter]:
579 593 if mf(filename):
580 594 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
581 595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
582 596 break
583 597
584 598 return data
585 599
586 600 def adddatafilter(self, name, filter):
587 601 self._datafilters[name] = filter
588 602
589 603 def wread(self, filename):
590 604 if self._link(filename):
591 605 data = os.readlink(self.wjoin(filename))
592 606 else:
593 607 data = self.wopener(filename, 'r').read()
594 608 return self._filter("encode", filename, data)
595 609
596 610 def wwrite(self, filename, data, flags):
597 611 data = self._filter("decode", filename, data)
598 612 try:
599 613 os.unlink(self.wjoin(filename))
600 614 except OSError:
601 615 pass
602 616 if 'l' in flags:
603 617 self.wopener.symlink(data, filename)
604 618 else:
605 619 self.wopener(filename, 'w').write(data)
606 620 if 'x' in flags:
607 621 util.set_flags(self.wjoin(filename), False, True)
608 622
609 623 def wwritedata(self, filename, data):
610 624 return self._filter("decode", filename, data)
611 625
612 626 def transaction(self):
613 627 tr = self._transref and self._transref() or None
614 628 if tr and tr.running():
615 629 return tr.nest()
616 630
617 631 # abort here if the journal already exists
618 632 if os.path.exists(self.sjoin("journal")):
619 633 raise error.RepoError(_("journal already exists - run hg recover"))
620 634
621 635 # save dirstate for rollback
622 636 try:
623 637 ds = self.opener("dirstate").read()
624 638 except IOError:
625 639 ds = ""
626 640 self.opener("journal.dirstate", "w").write(ds)
627 641 self.opener("journal.branch", "w").write(self.dirstate.branch())
628 642
629 643 renames = [(self.sjoin("journal"), self.sjoin("undo")),
630 644 (self.join("journal.dirstate"), self.join("undo.dirstate")),
631 645 (self.join("journal.branch"), self.join("undo.branch"))]
632 646 tr = transaction.transaction(self.ui.warn, self.sopener,
633 647 self.sjoin("journal"),
634 648 aftertrans(renames),
635 649 self.store.createmode)
636 650 self._transref = weakref.ref(tr)
637 651 return tr
638 652
639 653 def recover(self):
640 654 lock = self.lock()
641 655 try:
642 656 if os.path.exists(self.sjoin("journal")):
643 657 self.ui.status(_("rolling back interrupted transaction\n"))
644 658 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
645 659 self.invalidate()
646 660 return True
647 661 else:
648 662 self.ui.warn(_("no interrupted transaction available\n"))
649 663 return False
650 664 finally:
651 665 lock.release()
652 666
653 667 def rollback(self):
654 668 wlock = lock = None
655 669 try:
656 670 wlock = self.wlock()
657 671 lock = self.lock()
658 672 if os.path.exists(self.sjoin("undo")):
659 673 self.ui.status(_("rolling back last transaction\n"))
660 674 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
661 675 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
662 676 try:
663 677 branch = self.opener("undo.branch").read()
664 678 self.dirstate.setbranch(branch)
665 679 except IOError:
666 680 self.ui.warn(_("Named branch could not be reset, "
667 681 "current branch still is: %s\n")
668 682 % encoding.tolocal(self.dirstate.branch()))
669 683 self.invalidate()
670 684 self.dirstate.invalidate()
671 685 else:
672 686 self.ui.warn(_("no rollback information available\n"))
673 687 finally:
674 688 release(lock, wlock)
675 689
676 690 def invalidate(self):
677 691 for a in "changelog manifest".split():
678 692 if a in self.__dict__:
679 693 delattr(self, a)
680 694 self.tagscache = None
681 695 self._tagstypecache = None
682 696 self.nodetagscache = None
683 697 self.branchcache = None
684 698 self._ubranchcache = None
685 699 self._branchcachetip = None
686 700
687 701 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
688 702 try:
689 703 l = lock.lock(lockname, 0, releasefn, desc=desc)
690 704 except error.LockHeld, inst:
691 705 if not wait:
692 706 raise
693 707 self.ui.warn(_("waiting for lock on %s held by %r\n") %
694 708 (desc, inst.locker))
695 709 # default to 600 seconds timeout
696 710 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
697 711 releasefn, desc=desc)
698 712 if acquirefn:
699 713 acquirefn()
700 714 return l
701 715
702 716 def lock(self, wait=True):
703 717 l = self._lockref and self._lockref()
704 718 if l is not None and l.held:
705 719 l.lock()
706 720 return l
707 721
708 722 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
709 723 _('repository %s') % self.origroot)
710 724 self._lockref = weakref.ref(l)
711 725 return l
712 726
713 727 def wlock(self, wait=True):
714 728 l = self._wlockref and self._wlockref()
715 729 if l is not None and l.held:
716 730 l.lock()
717 731 return l
718 732
719 733 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
720 734 self.dirstate.invalidate, _('working directory of %s') %
721 735 self.origroot)
722 736 self._wlockref = weakref.ref(l)
723 737 return l
724 738
725 739 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
726 740 """
727 741 commit an individual file as part of a larger transaction
728 742 """
729 743
730 744 fname = fctx.path()
731 745 text = fctx.data()
732 746 flog = self.file(fname)
733 747 fparent1 = manifest1.get(fname, nullid)
734 748 fparent2 = fparent2o = manifest2.get(fname, nullid)
735 749
736 750 meta = {}
737 751 copy = fctx.renamed()
738 752 if copy and copy[0] != fname:
739 753 # Mark the new revision of this file as a copy of another
740 754 # file. This copy data will effectively act as a parent
741 755 # of this new revision. If this is a merge, the first
742 756 # parent will be the nullid (meaning "look up the copy data")
743 757 # and the second one will be the other parent. For example:
744 758 #
745 759 # 0 --- 1 --- 3 rev1 changes file foo
746 760 # \ / rev2 renames foo to bar and changes it
747 761 # \- 2 -/ rev3 should have bar with all changes and
748 762 # should record that bar descends from
749 763 # bar in rev2 and foo in rev1
750 764 #
751 765 # this allows this merge to succeed:
752 766 #
753 767 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
754 768 # \ / merging rev3 and rev4 should use bar@rev2
755 769 # \- 2 --- 4 as the merge base
756 770 #
757 771
758 772 cfname = copy[0]
759 773 crev = manifest1.get(cfname)
760 774 newfparent = fparent2
761 775
762 776 if manifest2: # branch merge
763 777 if fparent2 == nullid or crev is None: # copied on remote side
764 778 if cfname in manifest2:
765 779 crev = manifest2[cfname]
766 780 newfparent = fparent1
767 781
768 782 # find source in nearest ancestor if we've lost track
769 783 if not crev:
770 784 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
771 785 (fname, cfname))
772 786 for ancestor in self['.'].ancestors():
773 787 if cfname in ancestor:
774 788 crev = ancestor[cfname].filenode()
775 789 break
776 790
777 791 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
778 792 meta["copy"] = cfname
779 793 meta["copyrev"] = hex(crev)
780 794 fparent1, fparent2 = nullid, newfparent
781 795 elif fparent2 != nullid:
782 796 # is one parent an ancestor of the other?
783 797 fparentancestor = flog.ancestor(fparent1, fparent2)
784 798 if fparentancestor == fparent1:
785 799 fparent1, fparent2 = fparent2, nullid
786 800 elif fparentancestor == fparent2:
787 801 fparent2 = nullid
788 802
789 803 # is the file changed?
790 804 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
791 805 changelist.append(fname)
792 806 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
793 807
794 808 # are just the flags changed during merge?
795 809 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
796 810 changelist.append(fname)
797 811
798 812 return fparent1
799 813
800 814 def commit(self, text="", user=None, date=None, match=None, force=False,
801 815 editor=False, extra={}):
802 816 """Add a new revision to current repository.
803 817
804 818 Revision information is gathered from the working directory,
805 819 match can be used to filter the committed files. If editor is
806 820 supplied, it is called to get a commit message.
807 821 """
808 822
809 823 def fail(f, msg):
810 824 raise util.Abort('%s: %s' % (f, msg))
811 825
812 826 if not match:
813 827 match = match_.always(self.root, '')
814 828
815 829 if not force:
816 830 vdirs = []
817 831 match.dir = vdirs.append
818 832 match.bad = fail
819 833
820 834 wlock = self.wlock()
821 835 try:
822 836 p1, p2 = self.dirstate.parents()
823 837 wctx = self[None]
824 838
825 839 if (not force and p2 != nullid and match and
826 840 (match.files() or match.anypats())):
827 841 raise util.Abort(_('cannot partially commit a merge '
828 842 '(do not specify files or patterns)'))
829 843
830 844 changes = self.status(match=match, clean=force)
831 845 if force:
832 846 changes[0].extend(changes[6]) # mq may commit unchanged files
833 847
834 848 # check subrepos
835 849 subs = []
836 850 for s in wctx.substate:
837 851 if match(s) and wctx.sub(s).dirty():
838 852 subs.append(s)
839 853 if subs and '.hgsubstate' not in changes[0]:
840 854 changes[0].insert(0, '.hgsubstate')
841 855
842 856 # make sure all explicit patterns are matched
843 857 if not force and match.files():
844 858 matched = set(changes[0] + changes[1] + changes[2])
845 859
846 860 for f in match.files():
847 861 if f == '.' or f in matched or f in wctx.substate:
848 862 continue
849 863 if f in changes[3]: # missing
850 864 fail(f, _('file not found!'))
851 865 if f in vdirs: # visited directory
852 866 d = f + '/'
853 867 for mf in matched:
854 868 if mf.startswith(d):
855 869 break
856 870 else:
857 871 fail(f, _("no match under directory!"))
858 872 elif f not in self.dirstate:
859 873 fail(f, _("file not tracked!"))
860 874
861 875 if (not force and not extra.get("close") and p2 == nullid
862 876 and not (changes[0] or changes[1] or changes[2])
863 877 and self[None].branch() == self['.'].branch()):
864 878 return None
865 879
866 880 ms = merge_.mergestate(self)
867 881 for f in changes[0]:
868 882 if f in ms and ms[f] == 'u':
869 883 raise util.Abort(_("unresolved merge conflicts "
870 884 "(see hg resolve)"))
871 885
872 886 cctx = context.workingctx(self, (p1, p2), text, user, date,
873 887 extra, changes)
874 888 if editor:
875 889 cctx._text = editor(self, cctx, subs)
876 890
877 891 # commit subs
878 892 if subs:
879 893 state = wctx.substate.copy()
880 894 for s in subs:
881 895 self.ui.status(_('committing subrepository %s\n') % s)
882 896 sr = wctx.sub(s).commit(cctx._text, user, date)
883 897 state[s] = (state[s][0], sr)
884 898 subrepo.writestate(self, state)
885 899
886 900 ret = self.commitctx(cctx, True)
887 901
888 902 # update dirstate and mergestate
889 903 for f in changes[0] + changes[1]:
890 904 self.dirstate.normal(f)
891 905 for f in changes[2]:
892 906 self.dirstate.forget(f)
893 907 self.dirstate.setparents(ret)
894 908 ms.reset()
895 909
896 910 return ret
897 911
898 912 finally:
899 913 wlock.release()
900 914
901 915 def commitctx(self, ctx, error=False):
902 916 """Add a new revision to current repository.
903 917
904 918 Revision information is passed via the context argument.
905 919 """
906 920
907 921 tr = lock = None
908 922 removed = ctx.removed()
909 923 p1, p2 = ctx.p1(), ctx.p2()
910 924 m1 = p1.manifest().copy()
911 925 m2 = p2.manifest()
912 926 user = ctx.user()
913 927
914 928 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
915 929 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
916 930
917 931 lock = self.lock()
918 932 try:
919 933 tr = self.transaction()
920 934 trp = weakref.proxy(tr)
921 935
922 936 # check in files
923 937 new = {}
924 938 changed = []
925 939 linkrev = len(self)
926 940 for f in sorted(ctx.modified() + ctx.added()):
927 941 self.ui.note(f + "\n")
928 942 try:
929 943 fctx = ctx[f]
930 944 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
931 945 changed)
932 946 m1.set(f, fctx.flags())
933 947 except (OSError, IOError):
934 948 if error:
935 949 self.ui.warn(_("trouble committing %s!\n") % f)
936 950 raise
937 951 else:
938 952 removed.append(f)
939 953
940 954 # update manifest
941 955 m1.update(new)
942 956 removed = [f for f in sorted(removed) if f in m1 or f in m2]
943 957 drop = [f for f in removed if f in m1]
944 958 for f in drop:
945 959 del m1[f]
946 960 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
947 961 p2.manifestnode(), (new, drop))
948 962
949 963 # update changelog
950 964 self.changelog.delayupdate()
951 965 n = self.changelog.add(mn, changed + removed, ctx.description(),
952 966 trp, p1.node(), p2.node(),
953 967 user, ctx.date(), ctx.extra().copy())
954 968 p = lambda: self.changelog.writepending() and self.root or ""
955 969 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
956 970 parent2=xp2, pending=p)
957 971 self.changelog.finalize(trp)
958 972 tr.close()
959 973
960 974 if self.branchcache:
961 975 self.branchtags()
962 976
963 977 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
964 978 return n
965 979 finally:
966 980 del tr
967 981 lock.release()
968 982
969 983 def walk(self, match, node=None):
970 984 '''
971 985 walk recursively through the directory tree or a given
972 986 changeset, finding all files matched by the match
973 987 function
974 988 '''
975 989 return self[node].walk(match)
976 990
977 991 def status(self, node1='.', node2=None, match=None,
978 992 ignored=False, clean=False, unknown=False):
979 993 """return status of files between two nodes or node and working directory
980 994
981 995 If node1 is None, use the first dirstate parent instead.
982 996 If node2 is None, compare node1 with working directory.
983 997 """
984 998
985 999 def mfmatches(ctx):
986 1000 mf = ctx.manifest().copy()
987 1001 for fn in mf.keys():
988 1002 if not match(fn):
989 1003 del mf[fn]
990 1004 return mf
991 1005
992 1006 if isinstance(node1, context.changectx):
993 1007 ctx1 = node1
994 1008 else:
995 1009 ctx1 = self[node1]
996 1010 if isinstance(node2, context.changectx):
997 1011 ctx2 = node2
998 1012 else:
999 1013 ctx2 = self[node2]
1000 1014
1001 1015 working = ctx2.rev() is None
1002 1016 parentworking = working and ctx1 == self['.']
1003 1017 match = match or match_.always(self.root, self.getcwd())
1004 1018 listignored, listclean, listunknown = ignored, clean, unknown
1005 1019
1006 1020 # load earliest manifest first for caching reasons
1007 1021 if not working and ctx2.rev() < ctx1.rev():
1008 1022 ctx2.manifest()
1009 1023
1010 1024 if not parentworking:
1011 1025 def bad(f, msg):
1012 1026 if f not in ctx1:
1013 1027 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1014 1028 match.bad = bad
1015 1029
1016 1030 if working: # we need to scan the working dir
1017 1031 s = self.dirstate.status(match, listignored, listclean, listunknown)
1018 1032 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1019 1033
1020 1034 # check for any possibly clean files
1021 1035 if parentworking and cmp:
1022 1036 fixup = []
1023 1037 # do a full compare of any files that might have changed
1024 1038 for f in sorted(cmp):
1025 1039 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1026 1040 or ctx1[f].cmp(ctx2[f].data())):
1027 1041 modified.append(f)
1028 1042 else:
1029 1043 fixup.append(f)
1030 1044
1031 1045 if listclean:
1032 1046 clean += fixup
1033 1047
1034 1048 # update dirstate for files that are actually clean
1035 1049 if fixup:
1036 1050 try:
1037 1051 # updating the dirstate is optional
1038 1052 # so we don't wait on the lock
1039 1053 wlock = self.wlock(False)
1040 1054 try:
1041 1055 for f in fixup:
1042 1056 self.dirstate.normal(f)
1043 1057 finally:
1044 1058 wlock.release()
1045 1059 except error.LockError:
1046 1060 pass
1047 1061
1048 1062 if not parentworking:
1049 1063 mf1 = mfmatches(ctx1)
1050 1064 if working:
1051 1065 # we are comparing working dir against non-parent
1052 1066 # generate a pseudo-manifest for the working dir
1053 1067 mf2 = mfmatches(self['.'])
1054 1068 for f in cmp + modified + added:
1055 1069 mf2[f] = None
1056 1070 mf2.set(f, ctx2.flags(f))
1057 1071 for f in removed:
1058 1072 if f in mf2:
1059 1073 del mf2[f]
1060 1074 else:
1061 1075 # we are comparing two revisions
1062 1076 deleted, unknown, ignored = [], [], []
1063 1077 mf2 = mfmatches(ctx2)
1064 1078
1065 1079 modified, added, clean = [], [], []
1066 1080 for fn in mf2:
1067 1081 if fn in mf1:
1068 1082 if (mf1.flags(fn) != mf2.flags(fn) or
1069 1083 (mf1[fn] != mf2[fn] and
1070 1084 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1071 1085 modified.append(fn)
1072 1086 elif listclean:
1073 1087 clean.append(fn)
1074 1088 del mf1[fn]
1075 1089 else:
1076 1090 added.append(fn)
1077 1091 removed = mf1.keys()
1078 1092
1079 1093 r = modified, added, removed, deleted, unknown, ignored, clean
1080 1094 [l.sort() for l in r]
1081 1095 return r
1082 1096
1083 1097 def add(self, list):
1084 1098 wlock = self.wlock()
1085 1099 try:
1086 1100 rejected = []
1087 1101 for f in list:
1088 1102 p = self.wjoin(f)
1089 1103 try:
1090 1104 st = os.lstat(p)
1091 1105 except:
1092 1106 self.ui.warn(_("%s does not exist!\n") % f)
1093 1107 rejected.append(f)
1094 1108 continue
1095 1109 if st.st_size > 10000000:
1096 1110 self.ui.warn(_("%s: files over 10MB may cause memory and"
1097 1111 " performance problems\n"
1098 1112 "(use 'hg revert %s' to unadd the file)\n")
1099 1113 % (f, f))
1100 1114 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1101 1115 self.ui.warn(_("%s not added: only files and symlinks "
1102 1116 "supported currently\n") % f)
1103 1117 rejected.append(p)
1104 1118 elif self.dirstate[f] in 'amn':
1105 1119 self.ui.warn(_("%s already tracked!\n") % f)
1106 1120 elif self.dirstate[f] == 'r':
1107 1121 self.dirstate.normallookup(f)
1108 1122 else:
1109 1123 self.dirstate.add(f)
1110 1124 return rejected
1111 1125 finally:
1112 1126 wlock.release()
1113 1127
1114 1128 def forget(self, list):
1115 1129 wlock = self.wlock()
1116 1130 try:
1117 1131 for f in list:
1118 1132 if self.dirstate[f] != 'a':
1119 1133 self.ui.warn(_("%s not added!\n") % f)
1120 1134 else:
1121 1135 self.dirstate.forget(f)
1122 1136 finally:
1123 1137 wlock.release()
1124 1138
1125 1139 def remove(self, list, unlink=False):
1126 1140 if unlink:
1127 1141 for f in list:
1128 1142 try:
1129 1143 util.unlink(self.wjoin(f))
1130 1144 except OSError, inst:
1131 1145 if inst.errno != errno.ENOENT:
1132 1146 raise
1133 1147 wlock = self.wlock()
1134 1148 try:
1135 1149 for f in list:
1136 1150 if unlink and os.path.exists(self.wjoin(f)):
1137 1151 self.ui.warn(_("%s still exists!\n") % f)
1138 1152 elif self.dirstate[f] == 'a':
1139 1153 self.dirstate.forget(f)
1140 1154 elif f not in self.dirstate:
1141 1155 self.ui.warn(_("%s not tracked!\n") % f)
1142 1156 else:
1143 1157 self.dirstate.remove(f)
1144 1158 finally:
1145 1159 wlock.release()
1146 1160
1147 1161 def undelete(self, list):
1148 1162 manifests = [self.manifest.read(self.changelog.read(p)[0])
1149 1163 for p in self.dirstate.parents() if p != nullid]
1150 1164 wlock = self.wlock()
1151 1165 try:
1152 1166 for f in list:
1153 1167 if self.dirstate[f] != 'r':
1154 1168 self.ui.warn(_("%s not removed!\n") % f)
1155 1169 else:
1156 1170 m = f in manifests[0] and manifests[0] or manifests[1]
1157 1171 t = self.file(f).read(m[f])
1158 1172 self.wwrite(f, t, m.flags(f))
1159 1173 self.dirstate.normal(f)
1160 1174 finally:
1161 1175 wlock.release()
1162 1176
1163 1177 def copy(self, source, dest):
1164 1178 p = self.wjoin(dest)
1165 1179 if not (os.path.exists(p) or os.path.islink(p)):
1166 1180 self.ui.warn(_("%s does not exist!\n") % dest)
1167 1181 elif not (os.path.isfile(p) or os.path.islink(p)):
1168 1182 self.ui.warn(_("copy failed: %s is not a file or a "
1169 1183 "symbolic link\n") % dest)
1170 1184 else:
1171 1185 wlock = self.wlock()
1172 1186 try:
1173 1187 if self.dirstate[dest] in '?r':
1174 1188 self.dirstate.add(dest)
1175 1189 self.dirstate.copy(source, dest)
1176 1190 finally:
1177 1191 wlock.release()
1178 1192
1179 1193 def heads(self, start=None):
1180 1194 heads = self.changelog.heads(start)
1181 1195 # sort the output in rev descending order
1182 1196 heads = [(-self.changelog.rev(h), h) for h in heads]
1183 1197 return [n for (r, n) in sorted(heads)]
1184 1198
1185 1199 def branchheads(self, branch=None, start=None, closed=False):
1186 1200 if branch is None:
1187 1201 branch = self[None].branch()
1188 1202 branches = self.branchmap()
1189 1203 if branch not in branches:
1190 1204 return []
1191 1205 bheads = branches[branch]
1192 1206 # the cache returns heads ordered lowest to highest
1193 1207 bheads.reverse()
1194 1208 if start is not None:
1195 1209 # filter out the heads that cannot be reached from startrev
1196 1210 bheads = self.changelog.nodesbetween([start], bheads)[2]
1197 1211 if not closed:
1198 1212 bheads = [h for h in bheads if
1199 1213 ('close' not in self.changelog.read(h)[5])]
1200 1214 return bheads
1201 1215
1202 1216 def branches(self, nodes):
1203 1217 if not nodes:
1204 1218 nodes = [self.changelog.tip()]
1205 1219 b = []
1206 1220 for n in nodes:
1207 1221 t = n
1208 1222 while 1:
1209 1223 p = self.changelog.parents(n)
1210 1224 if p[1] != nullid or p[0] == nullid:
1211 1225 b.append((t, n, p[0], p[1]))
1212 1226 break
1213 1227 n = p[0]
1214 1228 return b
1215 1229
1216 1230 def between(self, pairs):
1217 1231 r = []
1218 1232
1219 1233 for top, bottom in pairs:
1220 1234 n, l, i = top, [], 0
1221 1235 f = 1
1222 1236
1223 1237 while n != bottom and n != nullid:
1224 1238 p = self.changelog.parents(n)[0]
1225 1239 if i == f:
1226 1240 l.append(n)
1227 1241 f = f * 2
1228 1242 n = p
1229 1243 i += 1
1230 1244
1231 1245 r.append(l)
1232 1246
1233 1247 return r
1234 1248
1235 1249 def findincoming(self, remote, base=None, heads=None, force=False):
1236 1250 """Return list of roots of the subsets of missing nodes from remote
1237 1251
1238 1252 If base dict is specified, assume that these nodes and their parents
1239 1253 exist on the remote side and that no child of a node of base exists
1240 1254 in both remote and self.
1241 1255 Furthermore base will be updated to include the nodes that exists
1242 1256 in self and remote but no children exists in self and remote.
1243 1257 If a list of heads is specified, return only nodes which are heads
1244 1258 or ancestors of these heads.
1245 1259
1246 1260 All the ancestors of base are in self and in remote.
1247 1261 All the descendants of the list returned are missing in self.
1248 1262 (and so we know that the rest of the nodes are missing in remote, see
1249 1263 outgoing)
1250 1264 """
1251 1265 return self.findcommonincoming(remote, base, heads, force)[1]
1252 1266
1253 1267 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1254 1268 """Return a tuple (common, missing roots, heads) used to identify
1255 1269 missing nodes from remote.
1256 1270
1257 1271 If base dict is specified, assume that these nodes and their parents
1258 1272 exist on the remote side and that no child of a node of base exists
1259 1273 in both remote and self.
1260 1274 Furthermore base will be updated to include the nodes that exists
1261 1275 in self and remote but no children exists in self and remote.
1262 1276 If a list of heads is specified, return only nodes which are heads
1263 1277 or ancestors of these heads.
1264 1278
1265 1279 All the ancestors of base are in self and in remote.
1266 1280 """
1267 1281 m = self.changelog.nodemap
1268 1282 search = []
1269 1283 fetch = set()
1270 1284 seen = set()
1271 1285 seenbranch = set()
1272 1286 if base is None:
1273 1287 base = {}
1274 1288
1275 1289 if not heads:
1276 1290 heads = remote.heads()
1277 1291
1278 1292 if self.changelog.tip() == nullid:
1279 1293 base[nullid] = 1
1280 1294 if heads != [nullid]:
1281 1295 return [nullid], [nullid], list(heads)
1282 1296 return [nullid], [], []
1283 1297
1284 1298 # assume we're closer to the tip than the root
1285 1299 # and start by examining the heads
1286 1300 self.ui.status(_("searching for changes\n"))
1287 1301
1288 1302 unknown = []
1289 1303 for h in heads:
1290 1304 if h not in m:
1291 1305 unknown.append(h)
1292 1306 else:
1293 1307 base[h] = 1
1294 1308
1295 1309 heads = unknown
1296 1310 if not unknown:
1297 1311 return base.keys(), [], []
1298 1312
1299 1313 req = set(unknown)
1300 1314 reqcnt = 0
1301 1315
1302 1316 # search through remote branches
1303 1317 # a 'branch' here is a linear segment of history, with four parts:
1304 1318 # head, root, first parent, second parent
1305 1319 # (a branch always has two parents (or none) by definition)
1306 1320 unknown = remote.branches(unknown)
1307 1321 while unknown:
1308 1322 r = []
1309 1323 while unknown:
1310 1324 n = unknown.pop(0)
1311 1325 if n[0] in seen:
1312 1326 continue
1313 1327
1314 1328 self.ui.debug(_("examining %s:%s\n")
1315 1329 % (short(n[0]), short(n[1])))
1316 1330 if n[0] == nullid: # found the end of the branch
1317 1331 pass
1318 1332 elif n in seenbranch:
1319 1333 self.ui.debug(_("branch already found\n"))
1320 1334 continue
1321 1335 elif n[1] and n[1] in m: # do we know the base?
1322 1336 self.ui.debug(_("found incomplete branch %s:%s\n")
1323 1337 % (short(n[0]), short(n[1])))
1324 1338 search.append(n[0:2]) # schedule branch range for scanning
1325 1339 seenbranch.add(n)
1326 1340 else:
1327 1341 if n[1] not in seen and n[1] not in fetch:
1328 1342 if n[2] in m and n[3] in m:
1329 1343 self.ui.debug(_("found new changeset %s\n") %
1330 1344 short(n[1]))
1331 1345 fetch.add(n[1]) # earliest unknown
1332 1346 for p in n[2:4]:
1333 1347 if p in m:
1334 1348 base[p] = 1 # latest known
1335 1349
1336 1350 for p in n[2:4]:
1337 1351 if p not in req and p not in m:
1338 1352 r.append(p)
1339 1353 req.add(p)
1340 1354 seen.add(n[0])
1341 1355
1342 1356 if r:
1343 1357 reqcnt += 1
1344 1358 self.ui.debug(_("request %d: %s\n") %
1345 1359 (reqcnt, " ".join(map(short, r))))
1346 1360 for p in xrange(0, len(r), 10):
1347 1361 for b in remote.branches(r[p:p+10]):
1348 1362 self.ui.debug(_("received %s:%s\n") %
1349 1363 (short(b[0]), short(b[1])))
1350 1364 unknown.append(b)
1351 1365
1352 1366 # do binary search on the branches we found
1353 1367 while search:
1354 1368 newsearch = []
1355 1369 reqcnt += 1
1356 1370 for n, l in zip(search, remote.between(search)):
1357 1371 l.append(n[1])
1358 1372 p = n[0]
1359 1373 f = 1
1360 1374 for i in l:
1361 1375 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1362 1376 if i in m:
1363 1377 if f <= 2:
1364 1378 self.ui.debug(_("found new branch changeset %s\n") %
1365 1379 short(p))
1366 1380 fetch.add(p)
1367 1381 base[i] = 1
1368 1382 else:
1369 1383 self.ui.debug(_("narrowed branch search to %s:%s\n")
1370 1384 % (short(p), short(i)))
1371 1385 newsearch.append((p, i))
1372 1386 break
1373 1387 p, f = i, f * 2
1374 1388 search = newsearch
1375 1389
1376 1390 # sanity check our fetch list
1377 1391 for f in fetch:
1378 1392 if f in m:
1379 1393 raise error.RepoError(_("already have changeset ")
1380 1394 + short(f[:4]))
1381 1395
1382 1396 if base.keys() == [nullid]:
1383 1397 if force:
1384 1398 self.ui.warn(_("warning: repository is unrelated\n"))
1385 1399 else:
1386 1400 raise util.Abort(_("repository is unrelated"))
1387 1401
1388 1402 self.ui.debug(_("found new changesets starting at ") +
1389 1403 " ".join([short(f) for f in fetch]) + "\n")
1390 1404
1391 1405 self.ui.debug(_("%d total queries\n") % reqcnt)
1392 1406
1393 1407 return base.keys(), list(fetch), heads
1394 1408
1395 1409 def findoutgoing(self, remote, base=None, heads=None, force=False):
1396 1410 """Return list of nodes that are roots of subsets not in remote
1397 1411
1398 1412 If base dict is specified, assume that these nodes and their parents
1399 1413 exist on the remote side.
1400 1414 If a list of heads is specified, return only nodes which are heads
1401 1415 or ancestors of these heads, and return a second element which
1402 1416 contains all remote heads which get new children.
1403 1417 """
1404 1418 if base is None:
1405 1419 base = {}
1406 1420 self.findincoming(remote, base, heads, force=force)
1407 1421
1408 1422 self.ui.debug(_("common changesets up to ")
1409 1423 + " ".join(map(short, base.keys())) + "\n")
1410 1424
1411 1425 remain = set(self.changelog.nodemap)
1412 1426
1413 1427 # prune everything remote has from the tree
1414 1428 remain.remove(nullid)
1415 1429 remove = base.keys()
1416 1430 while remove:
1417 1431 n = remove.pop(0)
1418 1432 if n in remain:
1419 1433 remain.remove(n)
1420 1434 for p in self.changelog.parents(n):
1421 1435 remove.append(p)
1422 1436
1423 1437 # find every node whose parents have been pruned
1424 1438 subset = []
1425 1439 # find every remote head that will get new children
1426 1440 updated_heads = set()
1427 1441 for n in remain:
1428 1442 p1, p2 = self.changelog.parents(n)
1429 1443 if p1 not in remain and p2 not in remain:
1430 1444 subset.append(n)
1431 1445 if heads:
1432 1446 if p1 in heads:
1433 1447 updated_heads.add(p1)
1434 1448 if p2 in heads:
1435 1449 updated_heads.add(p2)
1436 1450
1437 1451 # this is the set of all roots we have to push
1438 1452 if heads:
1439 1453 return subset, list(updated_heads)
1440 1454 else:
1441 1455 return subset
1442 1456
1443 1457 def pull(self, remote, heads=None, force=False):
1444 1458 lock = self.lock()
1445 1459 try:
1446 1460 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1447 1461 force=force)
1448 1462 if fetch == [nullid]:
1449 1463 self.ui.status(_("requesting all changes\n"))
1450 1464
1451 1465 if not fetch:
1452 1466 self.ui.status(_("no changes found\n"))
1453 1467 return 0
1454 1468
1455 1469 if heads is None and remote.capable('changegroupsubset'):
1456 1470 heads = rheads
1457 1471
1458 1472 if heads is None:
1459 1473 cg = remote.changegroup(fetch, 'pull')
1460 1474 else:
1461 1475 if not remote.capable('changegroupsubset'):
1462 1476 raise util.Abort(_("Partial pull cannot be done because "
1463 1477 "other repository doesn't support "
1464 1478 "changegroupsubset."))
1465 1479 cg = remote.changegroupsubset(fetch, heads, 'pull')
1466 1480 return self.addchangegroup(cg, 'pull', remote.url())
1467 1481 finally:
1468 1482 lock.release()
1469 1483
1470 1484 def push(self, remote, force=False, revs=None):
1471 1485 # there are two ways to push to remote repo:
1472 1486 #
1473 1487 # addchangegroup assumes local user can lock remote
1474 1488 # repo (local filesystem, old ssh servers).
1475 1489 #
1476 1490 # unbundle assumes local user cannot lock remote repo (new ssh
1477 1491 # servers, http servers).
1478 1492
1479 1493 if remote.capable('unbundle'):
1480 1494 return self.push_unbundle(remote, force, revs)
1481 1495 return self.push_addchangegroup(remote, force, revs)
1482 1496
1483 1497 def prepush(self, remote, force, revs):
1484 1498 common = {}
1485 1499 remote_heads = remote.heads()
1486 1500 inc = self.findincoming(remote, common, remote_heads, force=force)
1487 1501
1488 1502 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1489 1503 if revs is not None:
1490 1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1491 1505 else:
1492 1506 bases, heads = update, self.changelog.heads()
1493 1507
1494 1508 def checkbranch(lheads, rheads, updatelh):
1495 1509 '''
1496 1510 check whether there are more local heads than remote heads on
1497 1511 a specific branch.
1498 1512
1499 1513 lheads: local branch heads
1500 1514 rheads: remote branch heads
1501 1515 updatelh: outgoing local branch heads
1502 1516 '''
1503 1517
1504 1518 warn = 0
1505 1519
1506 1520 if not revs and len(lheads) > len(rheads):
1507 1521 warn = 1
1508 1522 else:
1509 1523 updatelheads = [self.changelog.heads(x, lheads)
1510 1524 for x in updatelh]
1511 1525 newheads = set(sum(updatelheads, [])) & set(lheads)
1512 1526
1513 1527 if not newheads:
1514 1528 return True
1515 1529
1516 1530 for r in rheads:
1517 1531 if r in self.changelog.nodemap:
1518 1532 desc = self.changelog.heads(r, heads)
1519 1533 l = [h for h in heads if h in desc]
1520 1534 if not l:
1521 1535 newheads.add(r)
1522 1536 else:
1523 1537 newheads.add(r)
1524 1538 if len(newheads) > len(rheads):
1525 1539 warn = 1
1526 1540
1527 1541 if warn:
1528 1542 if not rheads: # new branch requires --force
1529 1543 self.ui.warn(_("abort: push creates new"
1530 1544 " remote branch '%s'!\n") %
1531 1545 self[updatelh[0]].branch())
1532 1546 else:
1533 1547 self.ui.warn(_("abort: push creates new remote heads!\n"))
1534 1548
1535 1549 self.ui.status(_("(did you forget to merge?"
1536 1550 " use push -f to force)\n"))
1537 1551 return False
1538 1552 return True
1539 1553
1540 1554 if not bases:
1541 1555 self.ui.status(_("no changes found\n"))
1542 1556 return None, 1
1543 1557 elif not force:
1544 1558 # Check for each named branch if we're creating new remote heads.
1545 1559 # To be a remote head after push, node must be either:
1546 1560 # - unknown locally
1547 1561 # - a local outgoing head descended from update
1548 1562 # - a remote head that's known locally and not
1549 1563 # ancestral to an outgoing head
1550 1564 #
1551 1565 # New named branches cannot be created without --force.
1552 1566
1553 1567 if remote_heads != [nullid]:
1554 1568 if remote.capable('branchmap'):
1555 1569 localhds = {}
1556 1570 if not revs:
1557 1571 localhds = self.branchmap()
1558 1572 else:
1559 1573 for n in heads:
1560 1574 branch = self[n].branch()
1561 1575 if branch in localhds:
1562 1576 localhds[branch].append(n)
1563 1577 else:
1564 1578 localhds[branch] = [n]
1565 1579
1566 1580 remotehds = remote.branchmap()
1567 1581
1568 1582 for lh in localhds:
1569 1583 if lh in remotehds:
1570 1584 rheads = remotehds[lh]
1571 1585 else:
1572 1586 rheads = []
1573 1587 lheads = localhds[lh]
1574 1588 updatelh = [upd for upd in update
1575 1589 if self[upd].branch() == lh]
1576 1590 if not updatelh:
1577 1591 continue
1578 1592 if not checkbranch(lheads, rheads, updatelh):
1579 1593 return None, 0
1580 1594 else:
1581 1595 if not checkbranch(heads, remote_heads, update):
1582 1596 return None, 0
1583 1597
1584 1598 if inc:
1585 1599 self.ui.warn(_("note: unsynced remote changes!\n"))
1586 1600
1587 1601
1588 1602 if revs is None:
1589 1603 # use the fast path, no race possible on push
1590 1604 cg = self._changegroup(common.keys(), 'push')
1591 1605 else:
1592 1606 cg = self.changegroupsubset(update, revs, 'push')
1593 1607 return cg, remote_heads
1594 1608
1595 1609 def push_addchangegroup(self, remote, force, revs):
1596 1610 lock = remote.lock()
1597 1611 try:
1598 1612 ret = self.prepush(remote, force, revs)
1599 1613 if ret[0] is not None:
1600 1614 cg, remote_heads = ret
1601 1615 return remote.addchangegroup(cg, 'push', self.url())
1602 1616 return ret[1]
1603 1617 finally:
1604 1618 lock.release()
1605 1619
1606 1620 def push_unbundle(self, remote, force, revs):
1607 1621 # local repo finds heads on server, finds out what revs it
1608 1622 # must push. once revs transferred, if server finds it has
1609 1623 # different heads (someone else won commit/push race), server
1610 1624 # aborts.
1611 1625
1612 1626 ret = self.prepush(remote, force, revs)
1613 1627 if ret[0] is not None:
1614 1628 cg, remote_heads = ret
1615 1629 if force: remote_heads = ['force']
1616 1630 return remote.unbundle(cg, remote_heads, 'push')
1617 1631 return ret[1]
1618 1632
1619 1633 def changegroupinfo(self, nodes, source):
1620 1634 if self.ui.verbose or source == 'bundle':
1621 1635 self.ui.status(_("%d changesets found\n") % len(nodes))
1622 1636 if self.ui.debugflag:
1623 1637 self.ui.debug(_("list of changesets:\n"))
1624 1638 for node in nodes:
1625 1639 self.ui.debug("%s\n" % hex(node))
1626 1640
1627 1641 def changegroupsubset(self, bases, heads, source, extranodes=None):
1628 1642 """This function generates a changegroup consisting of all the nodes
1629 1643 that are descendents of any of the bases, and ancestors of any of
1630 1644 the heads.
1631 1645
1632 1646 It is fairly complex as determining which filenodes and which
1633 1647 manifest nodes need to be included for the changeset to be complete
1634 1648 is non-trivial.
1635 1649
1636 1650 Another wrinkle is doing the reverse, figuring out which changeset in
1637 1651 the changegroup a particular filenode or manifestnode belongs to.
1638 1652
1639 1653 The caller can specify some nodes that must be included in the
1640 1654 changegroup using the extranodes argument. It should be a dict
1641 1655 where the keys are the filenames (or 1 for the manifest), and the
1642 1656 values are lists of (node, linknode) tuples, where node is a wanted
1643 1657 node and linknode is the changelog node that should be transmitted as
1644 1658 the linkrev.
1645 1659 """
1646 1660
1647 1661 if extranodes is None:
1648 1662 # can we go through the fast path ?
1649 1663 heads.sort()
1650 1664 allheads = self.heads()
1651 1665 allheads.sort()
1652 1666 if heads == allheads:
1653 1667 common = []
1654 1668 # parents of bases are known from both sides
1655 1669 for n in bases:
1656 1670 for p in self.changelog.parents(n):
1657 1671 if p != nullid:
1658 1672 common.append(p)
1659 1673 return self._changegroup(common, source)
1660 1674
1661 1675 self.hook('preoutgoing', throw=True, source=source)
1662 1676
1663 1677 # Set up some initial variables
1664 1678 # Make it easy to refer to self.changelog
1665 1679 cl = self.changelog
1666 1680 # msng is short for missing - compute the list of changesets in this
1667 1681 # changegroup.
1668 1682 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1669 1683 self.changegroupinfo(msng_cl_lst, source)
1670 1684 # Some bases may turn out to be superfluous, and some heads may be
1671 1685 # too. nodesbetween will return the minimal set of bases and heads
1672 1686 # necessary to re-create the changegroup.
1673 1687
1674 1688 # Known heads are the list of heads that it is assumed the recipient
1675 1689 # of this changegroup will know about.
1676 1690 knownheads = set()
1677 1691 # We assume that all parents of bases are known heads.
1678 1692 for n in bases:
1679 1693 knownheads.update(cl.parents(n))
1680 1694 knownheads.discard(nullid)
1681 1695 knownheads = list(knownheads)
1682 1696 if knownheads:
1683 1697 # Now that we know what heads are known, we can compute which
1684 1698 # changesets are known. The recipient must know about all
1685 1699 # changesets required to reach the known heads from the null
1686 1700 # changeset.
1687 1701 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1688 1702 junk = None
1689 1703 # Transform the list into a set.
1690 1704 has_cl_set = set(has_cl_set)
1691 1705 else:
1692 1706 # If there were no known heads, the recipient cannot be assumed to
1693 1707 # know about any changesets.
1694 1708 has_cl_set = set()
1695 1709
1696 1710 # Make it easy to refer to self.manifest
1697 1711 mnfst = self.manifest
1698 1712 # We don't know which manifests are missing yet
1699 1713 msng_mnfst_set = {}
1700 1714 # Nor do we know which filenodes are missing.
1701 1715 msng_filenode_set = {}
1702 1716
1703 1717 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1704 1718 junk = None
1705 1719
1706 1720 # A changeset always belongs to itself, so the changenode lookup
1707 1721 # function for a changenode is identity.
1708 1722 def identity(x):
1709 1723 return x
1710 1724
1711 1725 # If we determine that a particular file or manifest node must be a
1712 1726 # node that the recipient of the changegroup will already have, we can
1713 1727 # also assume the recipient will have all the parents. This function
1714 1728 # prunes them from the set of missing nodes.
1715 1729 def prune_parents(revlog, hasset, msngset):
1716 1730 haslst = list(hasset)
1717 1731 haslst.sort(key=revlog.rev)
1718 1732 for node in haslst:
1719 1733 parentlst = [p for p in revlog.parents(node) if p != nullid]
1720 1734 while parentlst:
1721 1735 n = parentlst.pop()
1722 1736 if n not in hasset:
1723 1737 hasset.add(n)
1724 1738 p = [p for p in revlog.parents(n) if p != nullid]
1725 1739 parentlst.extend(p)
1726 1740 for n in hasset:
1727 1741 msngset.pop(n, None)
1728 1742
1729 1743 # This is a function generating function used to set up an environment
1730 1744 # for the inner function to execute in.
1731 1745 def manifest_and_file_collector(changedfileset):
1732 1746 # This is an information gathering function that gathers
1733 1747 # information from each changeset node that goes out as part of
1734 1748 # the changegroup. The information gathered is a list of which
1735 1749 # manifest nodes are potentially required (the recipient may
1736 1750 # already have them) and total list of all files which were
1737 1751 # changed in any changeset in the changegroup.
1738 1752 #
1739 1753 # We also remember the first changenode we saw any manifest
1740 1754 # referenced by so we can later determine which changenode 'owns'
1741 1755 # the manifest.
1742 1756 def collect_manifests_and_files(clnode):
1743 1757 c = cl.read(clnode)
1744 1758 for f in c[3]:
1745 1759 # This is to make sure we only have one instance of each
1746 1760 # filename string for each filename.
1747 1761 changedfileset.setdefault(f, f)
1748 1762 msng_mnfst_set.setdefault(c[0], clnode)
1749 1763 return collect_manifests_and_files
1750 1764
1751 1765 # Figure out which manifest nodes (of the ones we think might be part
1752 1766 # of the changegroup) the recipient must know about and remove them
1753 1767 # from the changegroup.
1754 1768 def prune_manifests():
1755 1769 has_mnfst_set = set()
1756 1770 for n in msng_mnfst_set:
1757 1771 # If a 'missing' manifest thinks it belongs to a changenode
1758 1772 # the recipient is assumed to have, obviously the recipient
1759 1773 # must have that manifest.
1760 1774 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1761 1775 if linknode in has_cl_set:
1762 1776 has_mnfst_set.add(n)
1763 1777 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1764 1778
1765 1779 # Use the information collected in collect_manifests_and_files to say
1766 1780 # which changenode any manifestnode belongs to.
1767 1781 def lookup_manifest_link(mnfstnode):
1768 1782 return msng_mnfst_set[mnfstnode]
1769 1783
1770 1784 # A function generating function that sets up the initial environment
1771 1785 # the inner function.
1772 1786 def filenode_collector(changedfiles):
1773 1787 next_rev = [0]
1774 1788 # This gathers information from each manifestnode included in the
1775 1789 # changegroup about which filenodes the manifest node references
1776 1790 # so we can include those in the changegroup too.
1777 1791 #
1778 1792 # It also remembers which changenode each filenode belongs to. It
1779 1793 # does this by assuming the a filenode belongs to the changenode
1780 1794 # the first manifest that references it belongs to.
1781 1795 def collect_msng_filenodes(mnfstnode):
1782 1796 r = mnfst.rev(mnfstnode)
1783 1797 if r == next_rev[0]:
1784 1798 # If the last rev we looked at was the one just previous,
1785 1799 # we only need to see a diff.
1786 1800 deltamf = mnfst.readdelta(mnfstnode)
1787 1801 # For each line in the delta
1788 1802 for f, fnode in deltamf.iteritems():
1789 1803 f = changedfiles.get(f, None)
1790 1804 # And if the file is in the list of files we care
1791 1805 # about.
1792 1806 if f is not None:
1793 1807 # Get the changenode this manifest belongs to
1794 1808 clnode = msng_mnfst_set[mnfstnode]
1795 1809 # Create the set of filenodes for the file if
1796 1810 # there isn't one already.
1797 1811 ndset = msng_filenode_set.setdefault(f, {})
1798 1812 # And set the filenode's changelog node to the
1799 1813 # manifest's if it hasn't been set already.
1800 1814 ndset.setdefault(fnode, clnode)
1801 1815 else:
1802 1816 # Otherwise we need a full manifest.
1803 1817 m = mnfst.read(mnfstnode)
1804 1818 # For every file in we care about.
1805 1819 for f in changedfiles:
1806 1820 fnode = m.get(f, None)
1807 1821 # If it's in the manifest
1808 1822 if fnode is not None:
1809 1823 # See comments above.
1810 1824 clnode = msng_mnfst_set[mnfstnode]
1811 1825 ndset = msng_filenode_set.setdefault(f, {})
1812 1826 ndset.setdefault(fnode, clnode)
1813 1827 # Remember the revision we hope to see next.
1814 1828 next_rev[0] = r + 1
1815 1829 return collect_msng_filenodes
1816 1830
1817 1831 # We have a list of filenodes we think we need for a file, lets remove
1818 1832 # all those we know the recipient must have.
1819 1833 def prune_filenodes(f, filerevlog):
1820 1834 msngset = msng_filenode_set[f]
1821 1835 hasset = set()
1822 1836 # If a 'missing' filenode thinks it belongs to a changenode we
1823 1837 # assume the recipient must have, then the recipient must have
1824 1838 # that filenode.
1825 1839 for n in msngset:
1826 1840 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1827 1841 if clnode in has_cl_set:
1828 1842 hasset.add(n)
1829 1843 prune_parents(filerevlog, hasset, msngset)
1830 1844
1831 1845 # A function generator function that sets up the a context for the
1832 1846 # inner function.
1833 1847 def lookup_filenode_link_func(fname):
1834 1848 msngset = msng_filenode_set[fname]
1835 1849 # Lookup the changenode the filenode belongs to.
1836 1850 def lookup_filenode_link(fnode):
1837 1851 return msngset[fnode]
1838 1852 return lookup_filenode_link
1839 1853
1840 1854 # Add the nodes that were explicitly requested.
1841 1855 def add_extra_nodes(name, nodes):
1842 1856 if not extranodes or name not in extranodes:
1843 1857 return
1844 1858
1845 1859 for node, linknode in extranodes[name]:
1846 1860 if node not in nodes:
1847 1861 nodes[node] = linknode
1848 1862
1849 1863 # Now that we have all theses utility functions to help out and
1850 1864 # logically divide up the task, generate the group.
1851 1865 def gengroup():
1852 1866 # The set of changed files starts empty.
1853 1867 changedfiles = {}
1854 1868 # Create a changenode group generator that will call our functions
1855 1869 # back to lookup the owning changenode and collect information.
1856 1870 group = cl.group(msng_cl_lst, identity,
1857 1871 manifest_and_file_collector(changedfiles))
1858 1872 for chnk in group:
1859 1873 yield chnk
1860 1874
1861 1875 # The list of manifests has been collected by the generator
1862 1876 # calling our functions back.
1863 1877 prune_manifests()
1864 1878 add_extra_nodes(1, msng_mnfst_set)
1865 1879 msng_mnfst_lst = msng_mnfst_set.keys()
1866 1880 # Sort the manifestnodes by revision number.
1867 1881 msng_mnfst_lst.sort(key=mnfst.rev)
1868 1882 # Create a generator for the manifestnodes that calls our lookup
1869 1883 # and data collection functions back.
1870 1884 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1871 1885 filenode_collector(changedfiles))
1872 1886 for chnk in group:
1873 1887 yield chnk
1874 1888
1875 1889 # These are no longer needed, dereference and toss the memory for
1876 1890 # them.
1877 1891 msng_mnfst_lst = None
1878 1892 msng_mnfst_set.clear()
1879 1893
1880 1894 if extranodes:
1881 1895 for fname in extranodes:
1882 1896 if isinstance(fname, int):
1883 1897 continue
1884 1898 msng_filenode_set.setdefault(fname, {})
1885 1899 changedfiles[fname] = 1
1886 1900 # Go through all our files in order sorted by name.
1887 1901 for fname in sorted(changedfiles):
1888 1902 filerevlog = self.file(fname)
1889 1903 if not len(filerevlog):
1890 1904 raise util.Abort(_("empty or missing revlog for %s") % fname)
1891 1905 # Toss out the filenodes that the recipient isn't really
1892 1906 # missing.
1893 1907 if fname in msng_filenode_set:
1894 1908 prune_filenodes(fname, filerevlog)
1895 1909 add_extra_nodes(fname, msng_filenode_set[fname])
1896 1910 msng_filenode_lst = msng_filenode_set[fname].keys()
1897 1911 else:
1898 1912 msng_filenode_lst = []
1899 1913 # If any filenodes are left, generate the group for them,
1900 1914 # otherwise don't bother.
1901 1915 if len(msng_filenode_lst) > 0:
1902 1916 yield changegroup.chunkheader(len(fname))
1903 1917 yield fname
1904 1918 # Sort the filenodes by their revision #
1905 1919 msng_filenode_lst.sort(key=filerevlog.rev)
1906 1920 # Create a group generator and only pass in a changenode
1907 1921 # lookup function as we need to collect no information
1908 1922 # from filenodes.
1909 1923 group = filerevlog.group(msng_filenode_lst,
1910 1924 lookup_filenode_link_func(fname))
1911 1925 for chnk in group:
1912 1926 yield chnk
1913 1927 if fname in msng_filenode_set:
1914 1928 # Don't need this anymore, toss it to free memory.
1915 1929 del msng_filenode_set[fname]
1916 1930 # Signal that no more groups are left.
1917 1931 yield changegroup.closechunk()
1918 1932
1919 1933 if msng_cl_lst:
1920 1934 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1921 1935
1922 1936 return util.chunkbuffer(gengroup())
1923 1937
1924 1938 def changegroup(self, basenodes, source):
1925 1939 # to avoid a race we use changegroupsubset() (issue1320)
1926 1940 return self.changegroupsubset(basenodes, self.heads(), source)
1927 1941
1928 1942 def _changegroup(self, common, source):
1929 1943 """Generate a changegroup of all nodes that we have that a recipient
1930 1944 doesn't.
1931 1945
1932 1946 This is much easier than the previous function as we can assume that
1933 1947 the recipient has any changenode we aren't sending them.
1934 1948
1935 1949 common is the set of common nodes between remote and self"""
1936 1950
1937 1951 self.hook('preoutgoing', throw=True, source=source)
1938 1952
1939 1953 cl = self.changelog
1940 1954 nodes = cl.findmissing(common)
1941 1955 revset = set([cl.rev(n) for n in nodes])
1942 1956 self.changegroupinfo(nodes, source)
1943 1957
1944 1958 def identity(x):
1945 1959 return x
1946 1960
1947 1961 def gennodelst(log):
1948 1962 for r in log:
1949 1963 if log.linkrev(r) in revset:
1950 1964 yield log.node(r)
1951 1965
1952 1966 def changed_file_collector(changedfileset):
1953 1967 def collect_changed_files(clnode):
1954 1968 c = cl.read(clnode)
1955 1969 changedfileset.update(c[3])
1956 1970 return collect_changed_files
1957 1971
1958 1972 def lookuprevlink_func(revlog):
1959 1973 def lookuprevlink(n):
1960 1974 return cl.node(revlog.linkrev(revlog.rev(n)))
1961 1975 return lookuprevlink
1962 1976
1963 1977 def gengroup():
1964 1978 # construct a list of all changed files
1965 1979 changedfiles = set()
1966 1980
1967 1981 for chnk in cl.group(nodes, identity,
1968 1982 changed_file_collector(changedfiles)):
1969 1983 yield chnk
1970 1984
1971 1985 mnfst = self.manifest
1972 1986 nodeiter = gennodelst(mnfst)
1973 1987 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1974 1988 yield chnk
1975 1989
1976 1990 for fname in sorted(changedfiles):
1977 1991 filerevlog = self.file(fname)
1978 1992 if not len(filerevlog):
1979 1993 raise util.Abort(_("empty or missing revlog for %s") % fname)
1980 1994 nodeiter = gennodelst(filerevlog)
1981 1995 nodeiter = list(nodeiter)
1982 1996 if nodeiter:
1983 1997 yield changegroup.chunkheader(len(fname))
1984 1998 yield fname
1985 1999 lookup = lookuprevlink_func(filerevlog)
1986 2000 for chnk in filerevlog.group(nodeiter, lookup):
1987 2001 yield chnk
1988 2002
1989 2003 yield changegroup.closechunk()
1990 2004
1991 2005 if nodes:
1992 2006 self.hook('outgoing', node=hex(nodes[0]), source=source)
1993 2007
1994 2008 return util.chunkbuffer(gengroup())
1995 2009
1996 2010 def addchangegroup(self, source, srctype, url, emptyok=False):
1997 2011 """add changegroup to repo.
1998 2012
1999 2013 return values:
2000 2014 - nothing changed or no source: 0
2001 2015 - more heads than before: 1+added heads (2..n)
2002 2016 - less heads than before: -1-removed heads (-2..-n)
2003 2017 - number of heads stays the same: 1
2004 2018 """
2005 2019 def csmap(x):
2006 2020 self.ui.debug(_("add changeset %s\n") % short(x))
2007 2021 return len(cl)
2008 2022
2009 2023 def revmap(x):
2010 2024 return cl.rev(x)
2011 2025
2012 2026 if not source:
2013 2027 return 0
2014 2028
2015 2029 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2016 2030
2017 2031 changesets = files = revisions = 0
2018 2032
2019 2033 # write changelog data to temp files so concurrent readers will not see
2020 2034 # inconsistent view
2021 2035 cl = self.changelog
2022 2036 cl.delayupdate()
2023 2037 oldheads = len(cl.heads())
2024 2038
2025 2039 tr = self.transaction()
2026 2040 try:
2027 2041 trp = weakref.proxy(tr)
2028 2042 # pull off the changeset group
2029 2043 self.ui.status(_("adding changesets\n"))
2030 2044 clstart = len(cl)
2031 2045 chunkiter = changegroup.chunkiter(source)
2032 2046 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2033 2047 raise util.Abort(_("received changelog group is empty"))
2034 2048 clend = len(cl)
2035 2049 changesets = clend - clstart
2036 2050
2037 2051 # pull off the manifest group
2038 2052 self.ui.status(_("adding manifests\n"))
2039 2053 chunkiter = changegroup.chunkiter(source)
2040 2054 # no need to check for empty manifest group here:
2041 2055 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2042 2056 # no new manifest will be created and the manifest group will
2043 2057 # be empty during the pull
2044 2058 self.manifest.addgroup(chunkiter, revmap, trp)
2045 2059
2046 2060 # process the files
2047 2061 self.ui.status(_("adding file changes\n"))
2048 2062 while 1:
2049 2063 f = changegroup.getchunk(source)
2050 2064 if not f:
2051 2065 break
2052 2066 self.ui.debug(_("adding %s revisions\n") % f)
2053 2067 fl = self.file(f)
2054 2068 o = len(fl)
2055 2069 chunkiter = changegroup.chunkiter(source)
2056 2070 if fl.addgroup(chunkiter, revmap, trp) is None:
2057 2071 raise util.Abort(_("received file revlog group is empty"))
2058 2072 revisions += len(fl) - o
2059 2073 files += 1
2060 2074
2061 2075 newheads = len(cl.heads())
2062 2076 heads = ""
2063 2077 if oldheads and newheads != oldheads:
2064 2078 heads = _(" (%+d heads)") % (newheads - oldheads)
2065 2079
2066 2080 self.ui.status(_("added %d changesets"
2067 2081 " with %d changes to %d files%s\n")
2068 2082 % (changesets, revisions, files, heads))
2069 2083
2070 2084 if changesets > 0:
2071 2085 p = lambda: cl.writepending() and self.root or ""
2072 2086 self.hook('pretxnchangegroup', throw=True,
2073 2087 node=hex(cl.node(clstart)), source=srctype,
2074 2088 url=url, pending=p)
2075 2089
2076 2090 # make changelog see real files again
2077 2091 cl.finalize(trp)
2078 2092
2079 2093 tr.close()
2080 2094 finally:
2081 2095 del tr
2082 2096
2083 2097 if changesets > 0:
2084 2098 # forcefully update the on-disk branch cache
2085 2099 self.ui.debug(_("updating the branch cache\n"))
2086 2100 self.branchtags()
2087 2101 self.hook("changegroup", node=hex(cl.node(clstart)),
2088 2102 source=srctype, url=url)
2089 2103
2090 2104 for i in xrange(clstart, clend):
2091 2105 self.hook("incoming", node=hex(cl.node(i)),
2092 2106 source=srctype, url=url)
2093 2107
2094 2108 # never return 0 here:
2095 2109 if newheads < oldheads:
2096 2110 return newheads - oldheads - 1
2097 2111 else:
2098 2112 return newheads - oldheads + 1
2099 2113
2100 2114
2101 2115 def stream_in(self, remote):
2102 2116 fp = remote.stream_out()
2103 2117 l = fp.readline()
2104 2118 try:
2105 2119 resp = int(l)
2106 2120 except ValueError:
2107 2121 raise error.ResponseError(
2108 2122 _('Unexpected response from remote server:'), l)
2109 2123 if resp == 1:
2110 2124 raise util.Abort(_('operation forbidden by server'))
2111 2125 elif resp == 2:
2112 2126 raise util.Abort(_('locking the remote repository failed'))
2113 2127 elif resp != 0:
2114 2128 raise util.Abort(_('the server sent an unknown error code'))
2115 2129 self.ui.status(_('streaming all changes\n'))
2116 2130 l = fp.readline()
2117 2131 try:
2118 2132 total_files, total_bytes = map(int, l.split(' ', 1))
2119 2133 except (ValueError, TypeError):
2120 2134 raise error.ResponseError(
2121 2135 _('Unexpected response from remote server:'), l)
2122 2136 self.ui.status(_('%d files to transfer, %s of data\n') %
2123 2137 (total_files, util.bytecount(total_bytes)))
2124 2138 start = time.time()
2125 2139 for i in xrange(total_files):
2126 2140 # XXX doesn't support '\n' or '\r' in filenames
2127 2141 l = fp.readline()
2128 2142 try:
2129 2143 name, size = l.split('\0', 1)
2130 2144 size = int(size)
2131 2145 except (ValueError, TypeError):
2132 2146 raise error.ResponseError(
2133 2147 _('Unexpected response from remote server:'), l)
2134 2148 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2135 2149 # for backwards compat, name was partially encoded
2136 2150 ofp = self.sopener(store.decodedir(name), 'w')
2137 2151 for chunk in util.filechunkiter(fp, limit=size):
2138 2152 ofp.write(chunk)
2139 2153 ofp.close()
2140 2154 elapsed = time.time() - start
2141 2155 if elapsed <= 0:
2142 2156 elapsed = 0.001
2143 2157 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2144 2158 (util.bytecount(total_bytes), elapsed,
2145 2159 util.bytecount(total_bytes / elapsed)))
2146 2160 self.invalidate()
2147 2161 return len(self.heads()) + 1
2148 2162
2149 2163 def clone(self, remote, heads=[], stream=False):
2150 2164 '''clone remote repository.
2151 2165
2152 2166 keyword arguments:
2153 2167 heads: list of revs to clone (forces use of pull)
2154 2168 stream: use streaming clone if possible'''
2155 2169
2156 2170 # now, all clients that can request uncompressed clones can
2157 2171 # read repo formats supported by all servers that can serve
2158 2172 # them.
2159 2173
2160 2174 # if revlog format changes, client will have to check version
2161 2175 # and format flags on "stream" capability, and use
2162 2176 # uncompressed only if compatible.
2163 2177
2164 2178 if stream and not heads and remote.capable('stream'):
2165 2179 return self.stream_in(remote)
2166 2180 return self.pull(remote, heads)
2167 2181
2168 2182 # used to avoid circular references so destructors work
2169 2183 def aftertrans(files):
2170 2184 renamefiles = [tuple(t) for t in files]
2171 2185 def a():
2172 2186 for src, dest in renamefiles:
2173 2187 util.rename(src, dest)
2174 2188 return a
2175 2189
2176 2190 def instance(ui, path, create):
2177 2191 return localrepository(ui, util.drop_scheme('file', path), create)
2178 2192
2179 2193 def islocal(path):
2180 2194 return True
General Comments 0
You need to be logged in to leave comments. Login now