##// END OF EJS Templates
phases: store phase values in constant instead of using raw integer...
Pierre-Yves David -
r15818:57241845 default
parent child Browse files
Show More
@@ -1,1166 +1,1166
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding
10 import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15
15
16 class changectx(object):
16 class changectx(object):
17 """A changecontext object makes access to data related to a particular
17 """A changecontext object makes access to data related to a particular
18 changeset convenient."""
18 changeset convenient."""
19 def __init__(self, repo, changeid=''):
19 def __init__(self, repo, changeid=''):
20 """changeid is a revision number, node, or tag"""
20 """changeid is a revision number, node, or tag"""
21 if changeid == '':
21 if changeid == '':
22 changeid = '.'
22 changeid = '.'
23 self._repo = repo
23 self._repo = repo
24 if isinstance(changeid, (long, int)):
24 if isinstance(changeid, (long, int)):
25 self._rev = changeid
25 self._rev = changeid
26 self._node = self._repo.changelog.node(changeid)
26 self._node = self._repo.changelog.node(changeid)
27 else:
27 else:
28 self._node = self._repo.lookup(changeid)
28 self._node = self._repo.lookup(changeid)
29 self._rev = self._repo.changelog.rev(self._node)
29 self._rev = self._repo.changelog.rev(self._node)
30
30
31 def __str__(self):
31 def __str__(self):
32 return short(self.node())
32 return short(self.node())
33
33
34 def __int__(self):
34 def __int__(self):
35 return self.rev()
35 return self.rev()
36
36
37 def __repr__(self):
37 def __repr__(self):
38 return "<changectx %s>" % str(self)
38 return "<changectx %s>" % str(self)
39
39
40 def __hash__(self):
40 def __hash__(self):
41 try:
41 try:
42 return hash(self._rev)
42 return hash(self._rev)
43 except AttributeError:
43 except AttributeError:
44 return id(self)
44 return id(self)
45
45
46 def __eq__(self, other):
46 def __eq__(self, other):
47 try:
47 try:
48 return self._rev == other._rev
48 return self._rev == other._rev
49 except AttributeError:
49 except AttributeError:
50 return False
50 return False
51
51
52 def __ne__(self, other):
52 def __ne__(self, other):
53 return not (self == other)
53 return not (self == other)
54
54
55 def __nonzero__(self):
55 def __nonzero__(self):
56 return self._rev != nullrev
56 return self._rev != nullrev
57
57
58 @propertycache
58 @propertycache
59 def _changeset(self):
59 def _changeset(self):
60 return self._repo.changelog.read(self.node())
60 return self._repo.changelog.read(self.node())
61
61
62 @propertycache
62 @propertycache
63 def _manifest(self):
63 def _manifest(self):
64 return self._repo.manifest.read(self._changeset[0])
64 return self._repo.manifest.read(self._changeset[0])
65
65
66 @propertycache
66 @propertycache
67 def _manifestdelta(self):
67 def _manifestdelta(self):
68 return self._repo.manifest.readdelta(self._changeset[0])
68 return self._repo.manifest.readdelta(self._changeset[0])
69
69
70 @propertycache
70 @propertycache
71 def _parents(self):
71 def _parents(self):
72 p = self._repo.changelog.parentrevs(self._rev)
72 p = self._repo.changelog.parentrevs(self._rev)
73 if p[1] == nullrev:
73 if p[1] == nullrev:
74 p = p[:-1]
74 p = p[:-1]
75 return [changectx(self._repo, x) for x in p]
75 return [changectx(self._repo, x) for x in p]
76
76
77 @propertycache
77 @propertycache
78 def substate(self):
78 def substate(self):
79 return subrepo.state(self, self._repo.ui)
79 return subrepo.state(self, self._repo.ui)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 for f in sorted(self._manifest):
88 for f in sorted(self._manifest):
89 yield f
89 yield f
90
90
91 def changeset(self):
91 def changeset(self):
92 return self._changeset
92 return self._changeset
93 def manifest(self):
93 def manifest(self):
94 return self._manifest
94 return self._manifest
95 def manifestnode(self):
95 def manifestnode(self):
96 return self._changeset[0]
96 return self._changeset[0]
97
97
98 def rev(self):
98 def rev(self):
99 return self._rev
99 return self._rev
100 def node(self):
100 def node(self):
101 return self._node
101 return self._node
102 def hex(self):
102 def hex(self):
103 return hex(self._node)
103 return hex(self._node)
104 def user(self):
104 def user(self):
105 return self._changeset[1]
105 return self._changeset[1]
106 def date(self):
106 def date(self):
107 return self._changeset[2]
107 return self._changeset[2]
108 def files(self):
108 def files(self):
109 return self._changeset[3]
109 return self._changeset[3]
110 def description(self):
110 def description(self):
111 return self._changeset[4]
111 return self._changeset[4]
112 def branch(self):
112 def branch(self):
113 return encoding.tolocal(self._changeset[5].get("branch"))
113 return encoding.tolocal(self._changeset[5].get("branch"))
114 def extra(self):
114 def extra(self):
115 return self._changeset[5]
115 return self._changeset[5]
116 def tags(self):
116 def tags(self):
117 return self._repo.nodetags(self._node)
117 return self._repo.nodetags(self._node)
118 def bookmarks(self):
118 def bookmarks(self):
119 return self._repo.nodebookmarks(self._node)
119 return self._repo.nodebookmarks(self._node)
120 def phase(self):
120 def phase(self):
121 if self._rev == -1:
121 if self._rev == -1:
122 return 0
122 return phases.public
123 if self._rev >= len(self._repo._phaserev):
123 if self._rev >= len(self._repo._phaserev):
124 # outdated cache
124 # outdated cache
125 del self._repo._phaserev
125 del self._repo._phaserev
126 return self._repo._phaserev[self._rev]
126 return self._repo._phaserev[self._rev]
127 def mutable(self):
127 def mutable(self):
128 return self._repo._phaserev[self._rev] > 0
128 return self._repo._phaserev[self._rev] > phases.public
129 def hidden(self):
129 def hidden(self):
130 return self._rev in self._repo.changelog.hiddenrevs
130 return self._rev in self._repo.changelog.hiddenrevs
131
131
132 def parents(self):
132 def parents(self):
133 """return contexts for each parent changeset"""
133 """return contexts for each parent changeset"""
134 return self._parents
134 return self._parents
135
135
136 def p1(self):
136 def p1(self):
137 return self._parents[0]
137 return self._parents[0]
138
138
139 def p2(self):
139 def p2(self):
140 if len(self._parents) == 2:
140 if len(self._parents) == 2:
141 return self._parents[1]
141 return self._parents[1]
142 return changectx(self._repo, -1)
142 return changectx(self._repo, -1)
143
143
144 def children(self):
144 def children(self):
145 """return contexts for each child changeset"""
145 """return contexts for each child changeset"""
146 c = self._repo.changelog.children(self._node)
146 c = self._repo.changelog.children(self._node)
147 return [changectx(self._repo, x) for x in c]
147 return [changectx(self._repo, x) for x in c]
148
148
149 def ancestors(self):
149 def ancestors(self):
150 for a in self._repo.changelog.ancestors(self._rev):
150 for a in self._repo.changelog.ancestors(self._rev):
151 yield changectx(self._repo, a)
151 yield changectx(self._repo, a)
152
152
153 def descendants(self):
153 def descendants(self):
154 for d in self._repo.changelog.descendants(self._rev):
154 for d in self._repo.changelog.descendants(self._rev):
155 yield changectx(self._repo, d)
155 yield changectx(self._repo, d)
156
156
157 def _fileinfo(self, path):
157 def _fileinfo(self, path):
158 if '_manifest' in self.__dict__:
158 if '_manifest' in self.__dict__:
159 try:
159 try:
160 return self._manifest[path], self._manifest.flags(path)
160 return self._manifest[path], self._manifest.flags(path)
161 except KeyError:
161 except KeyError:
162 raise error.LookupError(self._node, path,
162 raise error.LookupError(self._node, path,
163 _('not found in manifest'))
163 _('not found in manifest'))
164 if '_manifestdelta' in self.__dict__ or path in self.files():
164 if '_manifestdelta' in self.__dict__ or path in self.files():
165 if path in self._manifestdelta:
165 if path in self._manifestdelta:
166 return self._manifestdelta[path], self._manifestdelta.flags(path)
166 return self._manifestdelta[path], self._manifestdelta.flags(path)
167 node, flag = self._repo.manifest.find(self._changeset[0], path)
167 node, flag = self._repo.manifest.find(self._changeset[0], path)
168 if not node:
168 if not node:
169 raise error.LookupError(self._node, path,
169 raise error.LookupError(self._node, path,
170 _('not found in manifest'))
170 _('not found in manifest'))
171
171
172 return node, flag
172 return node, flag
173
173
174 def filenode(self, path):
174 def filenode(self, path):
175 return self._fileinfo(path)[0]
175 return self._fileinfo(path)[0]
176
176
177 def flags(self, path):
177 def flags(self, path):
178 try:
178 try:
179 return self._fileinfo(path)[1]
179 return self._fileinfo(path)[1]
180 except error.LookupError:
180 except error.LookupError:
181 return ''
181 return ''
182
182
183 def filectx(self, path, fileid=None, filelog=None):
183 def filectx(self, path, fileid=None, filelog=None):
184 """get a file context from this changeset"""
184 """get a file context from this changeset"""
185 if fileid is None:
185 if fileid is None:
186 fileid = self.filenode(path)
186 fileid = self.filenode(path)
187 return filectx(self._repo, path, fileid=fileid,
187 return filectx(self._repo, path, fileid=fileid,
188 changectx=self, filelog=filelog)
188 changectx=self, filelog=filelog)
189
189
190 def ancestor(self, c2):
190 def ancestor(self, c2):
191 """
191 """
192 return the ancestor context of self and c2
192 return the ancestor context of self and c2
193 """
193 """
194 # deal with workingctxs
194 # deal with workingctxs
195 n2 = c2._node
195 n2 = c2._node
196 if n2 is None:
196 if n2 is None:
197 n2 = c2._parents[0]._node
197 n2 = c2._parents[0]._node
198 n = self._repo.changelog.ancestor(self._node, n2)
198 n = self._repo.changelog.ancestor(self._node, n2)
199 return changectx(self._repo, n)
199 return changectx(self._repo, n)
200
200
201 def walk(self, match):
201 def walk(self, match):
202 fset = set(match.files())
202 fset = set(match.files())
203 # for dirstate.walk, files=['.'] means "walk the whole tree".
203 # for dirstate.walk, files=['.'] means "walk the whole tree".
204 # follow that here, too
204 # follow that here, too
205 fset.discard('.')
205 fset.discard('.')
206 for fn in self:
206 for fn in self:
207 for ffn in fset:
207 for ffn in fset:
208 # match if the file is the exact name or a directory
208 # match if the file is the exact name or a directory
209 if ffn == fn or fn.startswith("%s/" % ffn):
209 if ffn == fn or fn.startswith("%s/" % ffn):
210 fset.remove(ffn)
210 fset.remove(ffn)
211 break
211 break
212 if match(fn):
212 if match(fn):
213 yield fn
213 yield fn
214 for fn in sorted(fset):
214 for fn in sorted(fset):
215 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
215 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
216 yield fn
216 yield fn
217
217
218 def sub(self, path):
218 def sub(self, path):
219 return subrepo.subrepo(self, path)
219 return subrepo.subrepo(self, path)
220
220
221 def match(self, pats=[], include=None, exclude=None, default='glob'):
221 def match(self, pats=[], include=None, exclude=None, default='glob'):
222 r = self._repo
222 r = self._repo
223 return matchmod.match(r.root, r.getcwd(), pats,
223 return matchmod.match(r.root, r.getcwd(), pats,
224 include, exclude, default,
224 include, exclude, default,
225 auditor=r.auditor, ctx=self)
225 auditor=r.auditor, ctx=self)
226
226
227 def diff(self, ctx2=None, match=None, **opts):
227 def diff(self, ctx2=None, match=None, **opts):
228 """Returns a diff generator for the given contexts and matcher"""
228 """Returns a diff generator for the given contexts and matcher"""
229 if ctx2 is None:
229 if ctx2 is None:
230 ctx2 = self.p1()
230 ctx2 = self.p1()
231 if ctx2 is not None and not isinstance(ctx2, changectx):
231 if ctx2 is not None and not isinstance(ctx2, changectx):
232 ctx2 = self._repo[ctx2]
232 ctx2 = self._repo[ctx2]
233 diffopts = patch.diffopts(self._repo.ui, opts)
233 diffopts = patch.diffopts(self._repo.ui, opts)
234 return patch.diff(self._repo, ctx2.node(), self.node(),
234 return patch.diff(self._repo, ctx2.node(), self.node(),
235 match=match, opts=diffopts)
235 match=match, opts=diffopts)
236
236
237 class filectx(object):
237 class filectx(object):
238 """A filecontext object makes access to data related to a particular
238 """A filecontext object makes access to data related to a particular
239 filerevision convenient."""
239 filerevision convenient."""
240 def __init__(self, repo, path, changeid=None, fileid=None,
240 def __init__(self, repo, path, changeid=None, fileid=None,
241 filelog=None, changectx=None):
241 filelog=None, changectx=None):
242 """changeid can be a changeset revision, node, or tag.
242 """changeid can be a changeset revision, node, or tag.
243 fileid can be a file revision or node."""
243 fileid can be a file revision or node."""
244 self._repo = repo
244 self._repo = repo
245 self._path = path
245 self._path = path
246
246
247 assert (changeid is not None
247 assert (changeid is not None
248 or fileid is not None
248 or fileid is not None
249 or changectx is not None), \
249 or changectx is not None), \
250 ("bad args: changeid=%r, fileid=%r, changectx=%r"
250 ("bad args: changeid=%r, fileid=%r, changectx=%r"
251 % (changeid, fileid, changectx))
251 % (changeid, fileid, changectx))
252
252
253 if filelog:
253 if filelog:
254 self._filelog = filelog
254 self._filelog = filelog
255
255
256 if changeid is not None:
256 if changeid is not None:
257 self._changeid = changeid
257 self._changeid = changeid
258 if changectx is not None:
258 if changectx is not None:
259 self._changectx = changectx
259 self._changectx = changectx
260 if fileid is not None:
260 if fileid is not None:
261 self._fileid = fileid
261 self._fileid = fileid
262
262
263 @propertycache
263 @propertycache
264 def _changectx(self):
264 def _changectx(self):
265 return changectx(self._repo, self._changeid)
265 return changectx(self._repo, self._changeid)
266
266
267 @propertycache
267 @propertycache
268 def _filelog(self):
268 def _filelog(self):
269 return self._repo.file(self._path)
269 return self._repo.file(self._path)
270
270
271 @propertycache
271 @propertycache
272 def _changeid(self):
272 def _changeid(self):
273 if '_changectx' in self.__dict__:
273 if '_changectx' in self.__dict__:
274 return self._changectx.rev()
274 return self._changectx.rev()
275 else:
275 else:
276 return self._filelog.linkrev(self._filerev)
276 return self._filelog.linkrev(self._filerev)
277
277
278 @propertycache
278 @propertycache
279 def _filenode(self):
279 def _filenode(self):
280 if '_fileid' in self.__dict__:
280 if '_fileid' in self.__dict__:
281 return self._filelog.lookup(self._fileid)
281 return self._filelog.lookup(self._fileid)
282 else:
282 else:
283 return self._changectx.filenode(self._path)
283 return self._changectx.filenode(self._path)
284
284
285 @propertycache
285 @propertycache
286 def _filerev(self):
286 def _filerev(self):
287 return self._filelog.rev(self._filenode)
287 return self._filelog.rev(self._filenode)
288
288
289 @propertycache
289 @propertycache
290 def _repopath(self):
290 def _repopath(self):
291 return self._path
291 return self._path
292
292
293 def __nonzero__(self):
293 def __nonzero__(self):
294 try:
294 try:
295 self._filenode
295 self._filenode
296 return True
296 return True
297 except error.LookupError:
297 except error.LookupError:
298 # file is missing
298 # file is missing
299 return False
299 return False
300
300
301 def __str__(self):
301 def __str__(self):
302 return "%s@%s" % (self.path(), short(self.node()))
302 return "%s@%s" % (self.path(), short(self.node()))
303
303
304 def __repr__(self):
304 def __repr__(self):
305 return "<filectx %s>" % str(self)
305 return "<filectx %s>" % str(self)
306
306
307 def __hash__(self):
307 def __hash__(self):
308 try:
308 try:
309 return hash((self._path, self._filenode))
309 return hash((self._path, self._filenode))
310 except AttributeError:
310 except AttributeError:
311 return id(self)
311 return id(self)
312
312
313 def __eq__(self, other):
313 def __eq__(self, other):
314 try:
314 try:
315 return (self._path == other._path
315 return (self._path == other._path
316 and self._filenode == other._filenode)
316 and self._filenode == other._filenode)
317 except AttributeError:
317 except AttributeError:
318 return False
318 return False
319
319
320 def __ne__(self, other):
320 def __ne__(self, other):
321 return not (self == other)
321 return not (self == other)
322
322
323 def filectx(self, fileid):
323 def filectx(self, fileid):
324 '''opens an arbitrary revision of the file without
324 '''opens an arbitrary revision of the file without
325 opening a new filelog'''
325 opening a new filelog'''
326 return filectx(self._repo, self._path, fileid=fileid,
326 return filectx(self._repo, self._path, fileid=fileid,
327 filelog=self._filelog)
327 filelog=self._filelog)
328
328
329 def filerev(self):
329 def filerev(self):
330 return self._filerev
330 return self._filerev
331 def filenode(self):
331 def filenode(self):
332 return self._filenode
332 return self._filenode
333 def flags(self):
333 def flags(self):
334 return self._changectx.flags(self._path)
334 return self._changectx.flags(self._path)
335 def filelog(self):
335 def filelog(self):
336 return self._filelog
336 return self._filelog
337
337
338 def rev(self):
338 def rev(self):
339 if '_changectx' in self.__dict__:
339 if '_changectx' in self.__dict__:
340 return self._changectx.rev()
340 return self._changectx.rev()
341 if '_changeid' in self.__dict__:
341 if '_changeid' in self.__dict__:
342 return self._changectx.rev()
342 return self._changectx.rev()
343 return self._filelog.linkrev(self._filerev)
343 return self._filelog.linkrev(self._filerev)
344
344
345 def linkrev(self):
345 def linkrev(self):
346 return self._filelog.linkrev(self._filerev)
346 return self._filelog.linkrev(self._filerev)
347 def node(self):
347 def node(self):
348 return self._changectx.node()
348 return self._changectx.node()
349 def hex(self):
349 def hex(self):
350 return hex(self.node())
350 return hex(self.node())
351 def user(self):
351 def user(self):
352 return self._changectx.user()
352 return self._changectx.user()
353 def date(self):
353 def date(self):
354 return self._changectx.date()
354 return self._changectx.date()
355 def files(self):
355 def files(self):
356 return self._changectx.files()
356 return self._changectx.files()
357 def description(self):
357 def description(self):
358 return self._changectx.description()
358 return self._changectx.description()
359 def branch(self):
359 def branch(self):
360 return self._changectx.branch()
360 return self._changectx.branch()
361 def extra(self):
361 def extra(self):
362 return self._changectx.extra()
362 return self._changectx.extra()
363 def manifest(self):
363 def manifest(self):
364 return self._changectx.manifest()
364 return self._changectx.manifest()
365 def changectx(self):
365 def changectx(self):
366 return self._changectx
366 return self._changectx
367
367
368 def data(self):
368 def data(self):
369 return self._filelog.read(self._filenode)
369 return self._filelog.read(self._filenode)
370 def path(self):
370 def path(self):
371 return self._path
371 return self._path
372 def size(self):
372 def size(self):
373 return self._filelog.size(self._filerev)
373 return self._filelog.size(self._filerev)
374
374
375 def isbinary(self):
375 def isbinary(self):
376 try:
376 try:
377 return util.binary(self.data())
377 return util.binary(self.data())
378 except IOError:
378 except IOError:
379 return False
379 return False
380
380
381 def cmp(self, fctx):
381 def cmp(self, fctx):
382 """compare with other file context
382 """compare with other file context
383
383
384 returns True if different than fctx.
384 returns True if different than fctx.
385 """
385 """
386 if (fctx._filerev is None and self._repo._encodefilterpats
386 if (fctx._filerev is None and self._repo._encodefilterpats
387 or self.size() == fctx.size()):
387 or self.size() == fctx.size()):
388 return self._filelog.cmp(self._filenode, fctx.data())
388 return self._filelog.cmp(self._filenode, fctx.data())
389
389
390 return True
390 return True
391
391
392 def renamed(self):
392 def renamed(self):
393 """check if file was actually renamed in this changeset revision
393 """check if file was actually renamed in this changeset revision
394
394
395 If rename logged in file revision, we report copy for changeset only
395 If rename logged in file revision, we report copy for changeset only
396 if file revisions linkrev points back to the changeset in question
396 if file revisions linkrev points back to the changeset in question
397 or both changeset parents contain different file revisions.
397 or both changeset parents contain different file revisions.
398 """
398 """
399
399
400 renamed = self._filelog.renamed(self._filenode)
400 renamed = self._filelog.renamed(self._filenode)
401 if not renamed:
401 if not renamed:
402 return renamed
402 return renamed
403
403
404 if self.rev() == self.linkrev():
404 if self.rev() == self.linkrev():
405 return renamed
405 return renamed
406
406
407 name = self.path()
407 name = self.path()
408 fnode = self._filenode
408 fnode = self._filenode
409 for p in self._changectx.parents():
409 for p in self._changectx.parents():
410 try:
410 try:
411 if fnode == p.filenode(name):
411 if fnode == p.filenode(name):
412 return None
412 return None
413 except error.LookupError:
413 except error.LookupError:
414 pass
414 pass
415 return renamed
415 return renamed
416
416
417 def parents(self):
417 def parents(self):
418 p = self._path
418 p = self._path
419 fl = self._filelog
419 fl = self._filelog
420 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
420 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
421
421
422 r = self._filelog.renamed(self._filenode)
422 r = self._filelog.renamed(self._filenode)
423 if r:
423 if r:
424 pl[0] = (r[0], r[1], None)
424 pl[0] = (r[0], r[1], None)
425
425
426 return [filectx(self._repo, p, fileid=n, filelog=l)
426 return [filectx(self._repo, p, fileid=n, filelog=l)
427 for p, n, l in pl if n != nullid]
427 for p, n, l in pl if n != nullid]
428
428
429 def p1(self):
429 def p1(self):
430 return self.parents()[0]
430 return self.parents()[0]
431
431
432 def p2(self):
432 def p2(self):
433 p = self.parents()
433 p = self.parents()
434 if len(p) == 2:
434 if len(p) == 2:
435 return p[1]
435 return p[1]
436 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
436 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
437
437
438 def children(self):
438 def children(self):
439 # hard for renames
439 # hard for renames
440 c = self._filelog.children(self._filenode)
440 c = self._filelog.children(self._filenode)
441 return [filectx(self._repo, self._path, fileid=x,
441 return [filectx(self._repo, self._path, fileid=x,
442 filelog=self._filelog) for x in c]
442 filelog=self._filelog) for x in c]
443
443
444 def annotate(self, follow=False, linenumber=None, diffopts=None):
444 def annotate(self, follow=False, linenumber=None, diffopts=None):
445 '''returns a list of tuples of (ctx, line) for each line
445 '''returns a list of tuples of (ctx, line) for each line
446 in the file, where ctx is the filectx of the node where
446 in the file, where ctx is the filectx of the node where
447 that line was last changed.
447 that line was last changed.
448 This returns tuples of ((ctx, linenumber), line) for each line,
448 This returns tuples of ((ctx, linenumber), line) for each line,
449 if "linenumber" parameter is NOT "None".
449 if "linenumber" parameter is NOT "None".
450 In such tuples, linenumber means one at the first appearance
450 In such tuples, linenumber means one at the first appearance
451 in the managed file.
451 in the managed file.
452 To reduce annotation cost,
452 To reduce annotation cost,
453 this returns fixed value(False is used) as linenumber,
453 this returns fixed value(False is used) as linenumber,
454 if "linenumber" parameter is "False".'''
454 if "linenumber" parameter is "False".'''
455
455
456 def decorate_compat(text, rev):
456 def decorate_compat(text, rev):
457 return ([rev] * len(text.splitlines()), text)
457 return ([rev] * len(text.splitlines()), text)
458
458
459 def without_linenumber(text, rev):
459 def without_linenumber(text, rev):
460 return ([(rev, False)] * len(text.splitlines()), text)
460 return ([(rev, False)] * len(text.splitlines()), text)
461
461
462 def with_linenumber(text, rev):
462 def with_linenumber(text, rev):
463 size = len(text.splitlines())
463 size = len(text.splitlines())
464 return ([(rev, i) for i in xrange(1, size + 1)], text)
464 return ([(rev, i) for i in xrange(1, size + 1)], text)
465
465
466 decorate = (((linenumber is None) and decorate_compat) or
466 decorate = (((linenumber is None) and decorate_compat) or
467 (linenumber and with_linenumber) or
467 (linenumber and with_linenumber) or
468 without_linenumber)
468 without_linenumber)
469
469
470 def pair(parent, child):
470 def pair(parent, child):
471 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
471 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
472 refine=True)
472 refine=True)
473 for (a1, a2, b1, b2), t in blocks:
473 for (a1, a2, b1, b2), t in blocks:
474 # Changed blocks ('!') or blocks made only of blank lines ('~')
474 # Changed blocks ('!') or blocks made only of blank lines ('~')
475 # belong to the child.
475 # belong to the child.
476 if t == '=':
476 if t == '=':
477 child[0][b1:b2] = parent[0][a1:a2]
477 child[0][b1:b2] = parent[0][a1:a2]
478 return child
478 return child
479
479
480 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
480 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
481 def getctx(path, fileid):
481 def getctx(path, fileid):
482 log = path == self._path and self._filelog or getlog(path)
482 log = path == self._path and self._filelog or getlog(path)
483 return filectx(self._repo, path, fileid=fileid, filelog=log)
483 return filectx(self._repo, path, fileid=fileid, filelog=log)
484 getctx = util.lrucachefunc(getctx)
484 getctx = util.lrucachefunc(getctx)
485
485
486 def parents(f):
486 def parents(f):
487 # we want to reuse filectx objects as much as possible
487 # we want to reuse filectx objects as much as possible
488 p = f._path
488 p = f._path
489 if f._filerev is None: # working dir
489 if f._filerev is None: # working dir
490 pl = [(n.path(), n.filerev()) for n in f.parents()]
490 pl = [(n.path(), n.filerev()) for n in f.parents()]
491 else:
491 else:
492 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
492 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
493
493
494 if follow:
494 if follow:
495 r = f.renamed()
495 r = f.renamed()
496 if r:
496 if r:
497 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
497 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
498
498
499 return [getctx(p, n) for p, n in pl if n != nullrev]
499 return [getctx(p, n) for p, n in pl if n != nullrev]
500
500
501 # use linkrev to find the first changeset where self appeared
501 # use linkrev to find the first changeset where self appeared
502 if self.rev() != self.linkrev():
502 if self.rev() != self.linkrev():
503 base = self.filectx(self.filerev())
503 base = self.filectx(self.filerev())
504 else:
504 else:
505 base = self
505 base = self
506
506
507 # This algorithm would prefer to be recursive, but Python is a
507 # This algorithm would prefer to be recursive, but Python is a
508 # bit recursion-hostile. Instead we do an iterative
508 # bit recursion-hostile. Instead we do an iterative
509 # depth-first search.
509 # depth-first search.
510
510
511 visit = [base]
511 visit = [base]
512 hist = {}
512 hist = {}
513 pcache = {}
513 pcache = {}
514 needed = {base: 1}
514 needed = {base: 1}
515 while visit:
515 while visit:
516 f = visit[-1]
516 f = visit[-1]
517 if f not in pcache:
517 if f not in pcache:
518 pcache[f] = parents(f)
518 pcache[f] = parents(f)
519
519
520 ready = True
520 ready = True
521 pl = pcache[f]
521 pl = pcache[f]
522 for p in pl:
522 for p in pl:
523 if p not in hist:
523 if p not in hist:
524 ready = False
524 ready = False
525 visit.append(p)
525 visit.append(p)
526 needed[p] = needed.get(p, 0) + 1
526 needed[p] = needed.get(p, 0) + 1
527 if ready:
527 if ready:
528 visit.pop()
528 visit.pop()
529 curr = decorate(f.data(), f)
529 curr = decorate(f.data(), f)
530 for p in pl:
530 for p in pl:
531 curr = pair(hist[p], curr)
531 curr = pair(hist[p], curr)
532 if needed[p] == 1:
532 if needed[p] == 1:
533 del hist[p]
533 del hist[p]
534 else:
534 else:
535 needed[p] -= 1
535 needed[p] -= 1
536
536
537 hist[f] = curr
537 hist[f] = curr
538 pcache[f] = []
538 pcache[f] = []
539
539
540 return zip(hist[base][0], hist[base][1].splitlines(True))
540 return zip(hist[base][0], hist[base][1].splitlines(True))
541
541
542 def ancestor(self, fc2, actx=None):
542 def ancestor(self, fc2, actx=None):
543 """
543 """
544 find the common ancestor file context, if any, of self, and fc2
544 find the common ancestor file context, if any, of self, and fc2
545
545
546 If actx is given, it must be the changectx of the common ancestor
546 If actx is given, it must be the changectx of the common ancestor
547 of self's and fc2's respective changesets.
547 of self's and fc2's respective changesets.
548 """
548 """
549
549
550 if actx is None:
550 if actx is None:
551 actx = self.changectx().ancestor(fc2.changectx())
551 actx = self.changectx().ancestor(fc2.changectx())
552
552
553 # the trivial case: changesets are unrelated, files must be too
553 # the trivial case: changesets are unrelated, files must be too
554 if not actx:
554 if not actx:
555 return None
555 return None
556
556
557 # the easy case: no (relevant) renames
557 # the easy case: no (relevant) renames
558 if fc2.path() == self.path() and self.path() in actx:
558 if fc2.path() == self.path() and self.path() in actx:
559 return actx[self.path()]
559 return actx[self.path()]
560 acache = {}
560 acache = {}
561
561
562 # prime the ancestor cache for the working directory
562 # prime the ancestor cache for the working directory
563 for c in (self, fc2):
563 for c in (self, fc2):
564 if c._filerev is None:
564 if c._filerev is None:
565 pl = [(n.path(), n.filenode()) for n in c.parents()]
565 pl = [(n.path(), n.filenode()) for n in c.parents()]
566 acache[(c._path, None)] = pl
566 acache[(c._path, None)] = pl
567
567
568 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
568 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
569 def parents(vertex):
569 def parents(vertex):
570 if vertex in acache:
570 if vertex in acache:
571 return acache[vertex]
571 return acache[vertex]
572 f, n = vertex
572 f, n = vertex
573 if f not in flcache:
573 if f not in flcache:
574 flcache[f] = self._repo.file(f)
574 flcache[f] = self._repo.file(f)
575 fl = flcache[f]
575 fl = flcache[f]
576 pl = [(f, p) for p in fl.parents(n) if p != nullid]
576 pl = [(f, p) for p in fl.parents(n) if p != nullid]
577 re = fl.renamed(n)
577 re = fl.renamed(n)
578 if re:
578 if re:
579 pl.append(re)
579 pl.append(re)
580 acache[vertex] = pl
580 acache[vertex] = pl
581 return pl
581 return pl
582
582
583 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
583 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
584 v = ancestor.ancestor(a, b, parents)
584 v = ancestor.ancestor(a, b, parents)
585 if v:
585 if v:
586 f, n = v
586 f, n = v
587 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
587 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
588
588
589 return None
589 return None
590
590
591 def ancestors(self):
591 def ancestors(self):
592 visit = {}
592 visit = {}
593 c = self
593 c = self
594 while True:
594 while True:
595 for parent in c.parents():
595 for parent in c.parents():
596 visit[(parent.rev(), parent.node())] = parent
596 visit[(parent.rev(), parent.node())] = parent
597 if not visit:
597 if not visit:
598 break
598 break
599 c = visit.pop(max(visit))
599 c = visit.pop(max(visit))
600 yield c
600 yield c
601
601
602 class workingctx(changectx):
602 class workingctx(changectx):
603 """A workingctx object makes access to data related to
603 """A workingctx object makes access to data related to
604 the current working directory convenient.
604 the current working directory convenient.
605 date - any valid date string or (unixtime, offset), or None.
605 date - any valid date string or (unixtime, offset), or None.
606 user - username string, or None.
606 user - username string, or None.
607 extra - a dictionary of extra values, or None.
607 extra - a dictionary of extra values, or None.
608 changes - a list of file lists as returned by localrepo.status()
608 changes - a list of file lists as returned by localrepo.status()
609 or None to use the repository status.
609 or None to use the repository status.
610 """
610 """
611 def __init__(self, repo, text="", user=None, date=None, extra=None,
611 def __init__(self, repo, text="", user=None, date=None, extra=None,
612 changes=None):
612 changes=None):
613 self._repo = repo
613 self._repo = repo
614 self._rev = None
614 self._rev = None
615 self._node = None
615 self._node = None
616 self._text = text
616 self._text = text
617 if date:
617 if date:
618 self._date = util.parsedate(date)
618 self._date = util.parsedate(date)
619 if user:
619 if user:
620 self._user = user
620 self._user = user
621 if changes:
621 if changes:
622 self._status = list(changes[:4])
622 self._status = list(changes[:4])
623 self._unknown = changes[4]
623 self._unknown = changes[4]
624 self._ignored = changes[5]
624 self._ignored = changes[5]
625 self._clean = changes[6]
625 self._clean = changes[6]
626 else:
626 else:
627 self._unknown = None
627 self._unknown = None
628 self._ignored = None
628 self._ignored = None
629 self._clean = None
629 self._clean = None
630
630
631 self._extra = {}
631 self._extra = {}
632 if extra:
632 if extra:
633 self._extra = extra.copy()
633 self._extra = extra.copy()
634 if 'branch' not in self._extra:
634 if 'branch' not in self._extra:
635 try:
635 try:
636 branch = encoding.fromlocal(self._repo.dirstate.branch())
636 branch = encoding.fromlocal(self._repo.dirstate.branch())
637 except UnicodeDecodeError:
637 except UnicodeDecodeError:
638 raise util.Abort(_('branch name not in UTF-8!'))
638 raise util.Abort(_('branch name not in UTF-8!'))
639 self._extra['branch'] = branch
639 self._extra['branch'] = branch
640 if self._extra['branch'] == '':
640 if self._extra['branch'] == '':
641 self._extra['branch'] = 'default'
641 self._extra['branch'] = 'default'
642
642
643 def __str__(self):
643 def __str__(self):
644 return str(self._parents[0]) + "+"
644 return str(self._parents[0]) + "+"
645
645
646 def __repr__(self):
646 def __repr__(self):
647 return "<workingctx %s>" % str(self)
647 return "<workingctx %s>" % str(self)
648
648
649 def __nonzero__(self):
649 def __nonzero__(self):
650 return True
650 return True
651
651
652 def __contains__(self, key):
652 def __contains__(self, key):
653 return self._repo.dirstate[key] not in "?r"
653 return self._repo.dirstate[key] not in "?r"
654
654
655 def _buildflagfunc(self):
655 def _buildflagfunc(self):
656 # Create a fallback function for getting file flags when the
656 # Create a fallback function for getting file flags when the
657 # filesystem doesn't support them
657 # filesystem doesn't support them
658
658
659 copiesget = self._repo.dirstate.copies().get
659 copiesget = self._repo.dirstate.copies().get
660
660
661 if len(self._parents) < 2:
661 if len(self._parents) < 2:
662 # when we have one parent, it's easy: copy from parent
662 # when we have one parent, it's easy: copy from parent
663 man = self._parents[0].manifest()
663 man = self._parents[0].manifest()
664 def func(f):
664 def func(f):
665 f = copiesget(f, f)
665 f = copiesget(f, f)
666 return man.flags(f)
666 return man.flags(f)
667 else:
667 else:
668 # merges are tricky: we try to reconstruct the unstored
668 # merges are tricky: we try to reconstruct the unstored
669 # result from the merge (issue1802)
669 # result from the merge (issue1802)
670 p1, p2 = self._parents
670 p1, p2 = self._parents
671 pa = p1.ancestor(p2)
671 pa = p1.ancestor(p2)
672 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
672 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
673
673
674 def func(f):
674 def func(f):
675 f = copiesget(f, f) # may be wrong for merges with copies
675 f = copiesget(f, f) # may be wrong for merges with copies
676 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
676 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
677 if fl1 == fl2:
677 if fl1 == fl2:
678 return fl1
678 return fl1
679 if fl1 == fla:
679 if fl1 == fla:
680 return fl2
680 return fl2
681 if fl2 == fla:
681 if fl2 == fla:
682 return fl1
682 return fl1
683 return '' # punt for conflicts
683 return '' # punt for conflicts
684
684
685 return func
685 return func
686
686
687 @propertycache
687 @propertycache
688 def _flagfunc(self):
688 def _flagfunc(self):
689 return self._repo.dirstate.flagfunc(self._buildflagfunc)
689 return self._repo.dirstate.flagfunc(self._buildflagfunc)
690
690
691 @propertycache
691 @propertycache
692 def _manifest(self):
692 def _manifest(self):
693 """generate a manifest corresponding to the working directory"""
693 """generate a manifest corresponding to the working directory"""
694
694
695 if self._unknown is None:
695 if self._unknown is None:
696 self.status(unknown=True)
696 self.status(unknown=True)
697
697
698 man = self._parents[0].manifest().copy()
698 man = self._parents[0].manifest().copy()
699 if len(self._parents) > 1:
699 if len(self._parents) > 1:
700 man2 = self.p2().manifest()
700 man2 = self.p2().manifest()
701 def getman(f):
701 def getman(f):
702 if f in man:
702 if f in man:
703 return man
703 return man
704 return man2
704 return man2
705 else:
705 else:
706 getman = lambda f: man
706 getman = lambda f: man
707
707
708 copied = self._repo.dirstate.copies()
708 copied = self._repo.dirstate.copies()
709 ff = self._flagfunc
709 ff = self._flagfunc
710 modified, added, removed, deleted = self._status
710 modified, added, removed, deleted = self._status
711 unknown = self._unknown
711 unknown = self._unknown
712 for i, l in (("a", added), ("m", modified), ("u", unknown)):
712 for i, l in (("a", added), ("m", modified), ("u", unknown)):
713 for f in l:
713 for f in l:
714 orig = copied.get(f, f)
714 orig = copied.get(f, f)
715 man[f] = getman(orig).get(orig, nullid) + i
715 man[f] = getman(orig).get(orig, nullid) + i
716 try:
716 try:
717 man.set(f, ff(f))
717 man.set(f, ff(f))
718 except OSError:
718 except OSError:
719 pass
719 pass
720
720
721 for f in deleted + removed:
721 for f in deleted + removed:
722 if f in man:
722 if f in man:
723 del man[f]
723 del man[f]
724
724
725 return man
725 return man
726
726
727 def __iter__(self):
727 def __iter__(self):
728 d = self._repo.dirstate
728 d = self._repo.dirstate
729 for f in d:
729 for f in d:
730 if d[f] != 'r':
730 if d[f] != 'r':
731 yield f
731 yield f
732
732
733 @propertycache
733 @propertycache
734 def _status(self):
734 def _status(self):
735 return self._repo.status()[:4]
735 return self._repo.status()[:4]
736
736
737 @propertycache
737 @propertycache
738 def _user(self):
738 def _user(self):
739 return self._repo.ui.username()
739 return self._repo.ui.username()
740
740
741 @propertycache
741 @propertycache
742 def _date(self):
742 def _date(self):
743 return util.makedate()
743 return util.makedate()
744
744
745 @propertycache
745 @propertycache
746 def _parents(self):
746 def _parents(self):
747 p = self._repo.dirstate.parents()
747 p = self._repo.dirstate.parents()
748 if p[1] == nullid:
748 if p[1] == nullid:
749 p = p[:-1]
749 p = p[:-1]
750 self._parents = [changectx(self._repo, x) for x in p]
750 self._parents = [changectx(self._repo, x) for x in p]
751 return self._parents
751 return self._parents
752
752
753 def status(self, ignored=False, clean=False, unknown=False):
753 def status(self, ignored=False, clean=False, unknown=False):
754 """Explicit status query
754 """Explicit status query
755 Unless this method is used to query the working copy status, the
755 Unless this method is used to query the working copy status, the
756 _status property will implicitly read the status using its default
756 _status property will implicitly read the status using its default
757 arguments."""
757 arguments."""
758 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
758 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
759 self._unknown = self._ignored = self._clean = None
759 self._unknown = self._ignored = self._clean = None
760 if unknown:
760 if unknown:
761 self._unknown = stat[4]
761 self._unknown = stat[4]
762 if ignored:
762 if ignored:
763 self._ignored = stat[5]
763 self._ignored = stat[5]
764 if clean:
764 if clean:
765 self._clean = stat[6]
765 self._clean = stat[6]
766 self._status = stat[:4]
766 self._status = stat[:4]
767 return stat
767 return stat
768
768
769 def manifest(self):
769 def manifest(self):
770 return self._manifest
770 return self._manifest
771 def user(self):
771 def user(self):
772 return self._user or self._repo.ui.username()
772 return self._user or self._repo.ui.username()
773 def date(self):
773 def date(self):
774 return self._date
774 return self._date
775 def description(self):
775 def description(self):
776 return self._text
776 return self._text
777 def files(self):
777 def files(self):
778 return sorted(self._status[0] + self._status[1] + self._status[2])
778 return sorted(self._status[0] + self._status[1] + self._status[2])
779
779
780 def modified(self):
780 def modified(self):
781 return self._status[0]
781 return self._status[0]
782 def added(self):
782 def added(self):
783 return self._status[1]
783 return self._status[1]
784 def removed(self):
784 def removed(self):
785 return self._status[2]
785 return self._status[2]
786 def deleted(self):
786 def deleted(self):
787 return self._status[3]
787 return self._status[3]
788 def unknown(self):
788 def unknown(self):
789 assert self._unknown is not None # must call status first
789 assert self._unknown is not None # must call status first
790 return self._unknown
790 return self._unknown
791 def ignored(self):
791 def ignored(self):
792 assert self._ignored is not None # must call status first
792 assert self._ignored is not None # must call status first
793 return self._ignored
793 return self._ignored
794 def clean(self):
794 def clean(self):
795 assert self._clean is not None # must call status first
795 assert self._clean is not None # must call status first
796 return self._clean
796 return self._clean
797 def branch(self):
797 def branch(self):
798 return encoding.tolocal(self._extra['branch'])
798 return encoding.tolocal(self._extra['branch'])
799 def extra(self):
799 def extra(self):
800 return self._extra
800 return self._extra
801
801
802 def tags(self):
802 def tags(self):
803 t = []
803 t = []
804 for p in self.parents():
804 for p in self.parents():
805 t.extend(p.tags())
805 t.extend(p.tags())
806 return t
806 return t
807
807
808 def bookmarks(self):
808 def bookmarks(self):
809 b = []
809 b = []
810 for p in self.parents():
810 for p in self.parents():
811 b.extend(p.bookmarks())
811 b.extend(p.bookmarks())
812 return b
812 return b
813
813
814 def phase(self):
814 def phase(self):
815 phase = 1 # default phase to draft
815 phase = phases.draft # default phase to draft
816 for p in self.parents():
816 for p in self.parents():
817 phase = max(phase, p.phase())
817 phase = max(phase, p.phase())
818 return phase
818 return phase
819
819
820 def hidden(self):
820 def hidden(self):
821 return False
821 return False
822
822
823 def children(self):
823 def children(self):
824 return []
824 return []
825
825
826 def flags(self, path):
826 def flags(self, path):
827 if '_manifest' in self.__dict__:
827 if '_manifest' in self.__dict__:
828 try:
828 try:
829 return self._manifest.flags(path)
829 return self._manifest.flags(path)
830 except KeyError:
830 except KeyError:
831 return ''
831 return ''
832
832
833 try:
833 try:
834 return self._flagfunc(path)
834 return self._flagfunc(path)
835 except OSError:
835 except OSError:
836 return ''
836 return ''
837
837
838 def filectx(self, path, filelog=None):
838 def filectx(self, path, filelog=None):
839 """get a file context from the working directory"""
839 """get a file context from the working directory"""
840 return workingfilectx(self._repo, path, workingctx=self,
840 return workingfilectx(self._repo, path, workingctx=self,
841 filelog=filelog)
841 filelog=filelog)
842
842
843 def ancestor(self, c2):
843 def ancestor(self, c2):
844 """return the ancestor context of self and c2"""
844 """return the ancestor context of self and c2"""
845 return self._parents[0].ancestor(c2) # punt on two parents for now
845 return self._parents[0].ancestor(c2) # punt on two parents for now
846
846
847 def walk(self, match):
847 def walk(self, match):
848 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
848 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
849 True, False))
849 True, False))
850
850
851 def dirty(self, missing=False):
851 def dirty(self, missing=False):
852 "check whether a working directory is modified"
852 "check whether a working directory is modified"
853 # check subrepos first
853 # check subrepos first
854 for s in self.substate:
854 for s in self.substate:
855 if self.sub(s).dirty():
855 if self.sub(s).dirty():
856 return True
856 return True
857 # check current working dir
857 # check current working dir
858 return (self.p2() or self.branch() != self.p1().branch() or
858 return (self.p2() or self.branch() != self.p1().branch() or
859 self.modified() or self.added() or self.removed() or
859 self.modified() or self.added() or self.removed() or
860 (missing and self.deleted()))
860 (missing and self.deleted()))
861
861
862 def add(self, list, prefix=""):
862 def add(self, list, prefix=""):
863 join = lambda f: os.path.join(prefix, f)
863 join = lambda f: os.path.join(prefix, f)
864 wlock = self._repo.wlock()
864 wlock = self._repo.wlock()
865 ui, ds = self._repo.ui, self._repo.dirstate
865 ui, ds = self._repo.ui, self._repo.dirstate
866 try:
866 try:
867 rejected = []
867 rejected = []
868 for f in list:
868 for f in list:
869 scmutil.checkportable(ui, join(f))
869 scmutil.checkportable(ui, join(f))
870 p = self._repo.wjoin(f)
870 p = self._repo.wjoin(f)
871 try:
871 try:
872 st = os.lstat(p)
872 st = os.lstat(p)
873 except OSError:
873 except OSError:
874 ui.warn(_("%s does not exist!\n") % join(f))
874 ui.warn(_("%s does not exist!\n") % join(f))
875 rejected.append(f)
875 rejected.append(f)
876 continue
876 continue
877 if st.st_size > 10000000:
877 if st.st_size > 10000000:
878 ui.warn(_("%s: up to %d MB of RAM may be required "
878 ui.warn(_("%s: up to %d MB of RAM may be required "
879 "to manage this file\n"
879 "to manage this file\n"
880 "(use 'hg revert %s' to cancel the "
880 "(use 'hg revert %s' to cancel the "
881 "pending addition)\n")
881 "pending addition)\n")
882 % (f, 3 * st.st_size // 1000000, join(f)))
882 % (f, 3 * st.st_size // 1000000, join(f)))
883 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
883 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
884 ui.warn(_("%s not added: only files and symlinks "
884 ui.warn(_("%s not added: only files and symlinks "
885 "supported currently\n") % join(f))
885 "supported currently\n") % join(f))
886 rejected.append(p)
886 rejected.append(p)
887 elif ds[f] in 'amn':
887 elif ds[f] in 'amn':
888 ui.warn(_("%s already tracked!\n") % join(f))
888 ui.warn(_("%s already tracked!\n") % join(f))
889 elif ds[f] == 'r':
889 elif ds[f] == 'r':
890 ds.normallookup(f)
890 ds.normallookup(f)
891 else:
891 else:
892 ds.add(f)
892 ds.add(f)
893 return rejected
893 return rejected
894 finally:
894 finally:
895 wlock.release()
895 wlock.release()
896
896
897 def forget(self, files):
897 def forget(self, files):
898 wlock = self._repo.wlock()
898 wlock = self._repo.wlock()
899 try:
899 try:
900 for f in files:
900 for f in files:
901 if self._repo.dirstate[f] != 'a':
901 if self._repo.dirstate[f] != 'a':
902 self._repo.dirstate.remove(f)
902 self._repo.dirstate.remove(f)
903 elif f not in self._repo.dirstate:
903 elif f not in self._repo.dirstate:
904 self._repo.ui.warn(_("%s not tracked!\n") % f)
904 self._repo.ui.warn(_("%s not tracked!\n") % f)
905 else:
905 else:
906 self._repo.dirstate.drop(f)
906 self._repo.dirstate.drop(f)
907 finally:
907 finally:
908 wlock.release()
908 wlock.release()
909
909
910 def ancestors(self):
910 def ancestors(self):
911 for a in self._repo.changelog.ancestors(
911 for a in self._repo.changelog.ancestors(
912 *[p.rev() for p in self._parents]):
912 *[p.rev() for p in self._parents]):
913 yield changectx(self._repo, a)
913 yield changectx(self._repo, a)
914
914
915 def undelete(self, list):
915 def undelete(self, list):
916 pctxs = self.parents()
916 pctxs = self.parents()
917 wlock = self._repo.wlock()
917 wlock = self._repo.wlock()
918 try:
918 try:
919 for f in list:
919 for f in list:
920 if self._repo.dirstate[f] != 'r':
920 if self._repo.dirstate[f] != 'r':
921 self._repo.ui.warn(_("%s not removed!\n") % f)
921 self._repo.ui.warn(_("%s not removed!\n") % f)
922 else:
922 else:
923 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
923 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
924 t = fctx.data()
924 t = fctx.data()
925 self._repo.wwrite(f, t, fctx.flags())
925 self._repo.wwrite(f, t, fctx.flags())
926 self._repo.dirstate.normal(f)
926 self._repo.dirstate.normal(f)
927 finally:
927 finally:
928 wlock.release()
928 wlock.release()
929
929
930 def copy(self, source, dest):
930 def copy(self, source, dest):
931 p = self._repo.wjoin(dest)
931 p = self._repo.wjoin(dest)
932 if not os.path.lexists(p):
932 if not os.path.lexists(p):
933 self._repo.ui.warn(_("%s does not exist!\n") % dest)
933 self._repo.ui.warn(_("%s does not exist!\n") % dest)
934 elif not (os.path.isfile(p) or os.path.islink(p)):
934 elif not (os.path.isfile(p) or os.path.islink(p)):
935 self._repo.ui.warn(_("copy failed: %s is not a file or a "
935 self._repo.ui.warn(_("copy failed: %s is not a file or a "
936 "symbolic link\n") % dest)
936 "symbolic link\n") % dest)
937 else:
937 else:
938 wlock = self._repo.wlock()
938 wlock = self._repo.wlock()
939 try:
939 try:
940 if self._repo.dirstate[dest] in '?r':
940 if self._repo.dirstate[dest] in '?r':
941 self._repo.dirstate.add(dest)
941 self._repo.dirstate.add(dest)
942 self._repo.dirstate.copy(source, dest)
942 self._repo.dirstate.copy(source, dest)
943 finally:
943 finally:
944 wlock.release()
944 wlock.release()
945
945
946 class workingfilectx(filectx):
946 class workingfilectx(filectx):
947 """A workingfilectx object makes access to data related to a particular
947 """A workingfilectx object makes access to data related to a particular
948 file in the working directory convenient."""
948 file in the working directory convenient."""
949 def __init__(self, repo, path, filelog=None, workingctx=None):
949 def __init__(self, repo, path, filelog=None, workingctx=None):
950 """changeid can be a changeset revision, node, or tag.
950 """changeid can be a changeset revision, node, or tag.
951 fileid can be a file revision or node."""
951 fileid can be a file revision or node."""
952 self._repo = repo
952 self._repo = repo
953 self._path = path
953 self._path = path
954 self._changeid = None
954 self._changeid = None
955 self._filerev = self._filenode = None
955 self._filerev = self._filenode = None
956
956
957 if filelog:
957 if filelog:
958 self._filelog = filelog
958 self._filelog = filelog
959 if workingctx:
959 if workingctx:
960 self._changectx = workingctx
960 self._changectx = workingctx
961
961
962 @propertycache
962 @propertycache
963 def _changectx(self):
963 def _changectx(self):
964 return workingctx(self._repo)
964 return workingctx(self._repo)
965
965
966 def __nonzero__(self):
966 def __nonzero__(self):
967 return True
967 return True
968
968
969 def __str__(self):
969 def __str__(self):
970 return "%s@%s" % (self.path(), self._changectx)
970 return "%s@%s" % (self.path(), self._changectx)
971
971
972 def __repr__(self):
972 def __repr__(self):
973 return "<workingfilectx %s>" % str(self)
973 return "<workingfilectx %s>" % str(self)
974
974
975 def data(self):
975 def data(self):
976 return self._repo.wread(self._path)
976 return self._repo.wread(self._path)
977 def renamed(self):
977 def renamed(self):
978 rp = self._repo.dirstate.copied(self._path)
978 rp = self._repo.dirstate.copied(self._path)
979 if not rp:
979 if not rp:
980 return None
980 return None
981 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
981 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
982
982
983 def parents(self):
983 def parents(self):
984 '''return parent filectxs, following copies if necessary'''
984 '''return parent filectxs, following copies if necessary'''
985 def filenode(ctx, path):
985 def filenode(ctx, path):
986 return ctx._manifest.get(path, nullid)
986 return ctx._manifest.get(path, nullid)
987
987
988 path = self._path
988 path = self._path
989 fl = self._filelog
989 fl = self._filelog
990 pcl = self._changectx._parents
990 pcl = self._changectx._parents
991 renamed = self.renamed()
991 renamed = self.renamed()
992
992
993 if renamed:
993 if renamed:
994 pl = [renamed + (None,)]
994 pl = [renamed + (None,)]
995 else:
995 else:
996 pl = [(path, filenode(pcl[0], path), fl)]
996 pl = [(path, filenode(pcl[0], path), fl)]
997
997
998 for pc in pcl[1:]:
998 for pc in pcl[1:]:
999 pl.append((path, filenode(pc, path), fl))
999 pl.append((path, filenode(pc, path), fl))
1000
1000
1001 return [filectx(self._repo, p, fileid=n, filelog=l)
1001 return [filectx(self._repo, p, fileid=n, filelog=l)
1002 for p, n, l in pl if n != nullid]
1002 for p, n, l in pl if n != nullid]
1003
1003
1004 def children(self):
1004 def children(self):
1005 return []
1005 return []
1006
1006
1007 def size(self):
1007 def size(self):
1008 return os.lstat(self._repo.wjoin(self._path)).st_size
1008 return os.lstat(self._repo.wjoin(self._path)).st_size
1009 def date(self):
1009 def date(self):
1010 t, tz = self._changectx.date()
1010 t, tz = self._changectx.date()
1011 try:
1011 try:
1012 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
1012 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
1013 except OSError, err:
1013 except OSError, err:
1014 if err.errno != errno.ENOENT:
1014 if err.errno != errno.ENOENT:
1015 raise
1015 raise
1016 return (t, tz)
1016 return (t, tz)
1017
1017
1018 def cmp(self, fctx):
1018 def cmp(self, fctx):
1019 """compare with other file context
1019 """compare with other file context
1020
1020
1021 returns True if different than fctx.
1021 returns True if different than fctx.
1022 """
1022 """
1023 # fctx should be a filectx (not a wfctx)
1023 # fctx should be a filectx (not a wfctx)
1024 # invert comparison to reuse the same code path
1024 # invert comparison to reuse the same code path
1025 return fctx.cmp(self)
1025 return fctx.cmp(self)
1026
1026
1027 class memctx(object):
1027 class memctx(object):
1028 """Use memctx to perform in-memory commits via localrepo.commitctx().
1028 """Use memctx to perform in-memory commits via localrepo.commitctx().
1029
1029
1030 Revision information is supplied at initialization time while
1030 Revision information is supplied at initialization time while
1031 related files data and is made available through a callback
1031 related files data and is made available through a callback
1032 mechanism. 'repo' is the current localrepo, 'parents' is a
1032 mechanism. 'repo' is the current localrepo, 'parents' is a
1033 sequence of two parent revisions identifiers (pass None for every
1033 sequence of two parent revisions identifiers (pass None for every
1034 missing parent), 'text' is the commit message and 'files' lists
1034 missing parent), 'text' is the commit message and 'files' lists
1035 names of files touched by the revision (normalized and relative to
1035 names of files touched by the revision (normalized and relative to
1036 repository root).
1036 repository root).
1037
1037
1038 filectxfn(repo, memctx, path) is a callable receiving the
1038 filectxfn(repo, memctx, path) is a callable receiving the
1039 repository, the current memctx object and the normalized path of
1039 repository, the current memctx object and the normalized path of
1040 requested file, relative to repository root. It is fired by the
1040 requested file, relative to repository root. It is fired by the
1041 commit function for every file in 'files', but calls order is
1041 commit function for every file in 'files', but calls order is
1042 undefined. If the file is available in the revision being
1042 undefined. If the file is available in the revision being
1043 committed (updated or added), filectxfn returns a memfilectx
1043 committed (updated or added), filectxfn returns a memfilectx
1044 object. If the file was removed, filectxfn raises an
1044 object. If the file was removed, filectxfn raises an
1045 IOError. Moved files are represented by marking the source file
1045 IOError. Moved files are represented by marking the source file
1046 removed and the new file added with copy information (see
1046 removed and the new file added with copy information (see
1047 memfilectx).
1047 memfilectx).
1048
1048
1049 user receives the committer name and defaults to current
1049 user receives the committer name and defaults to current
1050 repository username, date is the commit date in any format
1050 repository username, date is the commit date in any format
1051 supported by util.parsedate() and defaults to current date, extra
1051 supported by util.parsedate() and defaults to current date, extra
1052 is a dictionary of metadata or is left empty.
1052 is a dictionary of metadata or is left empty.
1053 """
1053 """
1054 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1054 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1055 date=None, extra=None):
1055 date=None, extra=None):
1056 self._repo = repo
1056 self._repo = repo
1057 self._rev = None
1057 self._rev = None
1058 self._node = None
1058 self._node = None
1059 self._text = text
1059 self._text = text
1060 self._date = date and util.parsedate(date) or util.makedate()
1060 self._date = date and util.parsedate(date) or util.makedate()
1061 self._user = user
1061 self._user = user
1062 parents = [(p or nullid) for p in parents]
1062 parents = [(p or nullid) for p in parents]
1063 p1, p2 = parents
1063 p1, p2 = parents
1064 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1064 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1065 files = sorted(set(files))
1065 files = sorted(set(files))
1066 self._status = [files, [], [], [], []]
1066 self._status = [files, [], [], [], []]
1067 self._filectxfn = filectxfn
1067 self._filectxfn = filectxfn
1068
1068
1069 self._extra = extra and extra.copy() or {}
1069 self._extra = extra and extra.copy() or {}
1070 if self._extra.get('branch', '') == '':
1070 if self._extra.get('branch', '') == '':
1071 self._extra['branch'] = 'default'
1071 self._extra['branch'] = 'default'
1072
1072
1073 def __str__(self):
1073 def __str__(self):
1074 return str(self._parents[0]) + "+"
1074 return str(self._parents[0]) + "+"
1075
1075
1076 def __int__(self):
1076 def __int__(self):
1077 return self._rev
1077 return self._rev
1078
1078
1079 def __nonzero__(self):
1079 def __nonzero__(self):
1080 return True
1080 return True
1081
1081
1082 def __getitem__(self, key):
1082 def __getitem__(self, key):
1083 return self.filectx(key)
1083 return self.filectx(key)
1084
1084
1085 def p1(self):
1085 def p1(self):
1086 return self._parents[0]
1086 return self._parents[0]
1087 def p2(self):
1087 def p2(self):
1088 return self._parents[1]
1088 return self._parents[1]
1089
1089
1090 def user(self):
1090 def user(self):
1091 return self._user or self._repo.ui.username()
1091 return self._user or self._repo.ui.username()
1092 def date(self):
1092 def date(self):
1093 return self._date
1093 return self._date
1094 def description(self):
1094 def description(self):
1095 return self._text
1095 return self._text
1096 def files(self):
1096 def files(self):
1097 return self.modified()
1097 return self.modified()
1098 def modified(self):
1098 def modified(self):
1099 return self._status[0]
1099 return self._status[0]
1100 def added(self):
1100 def added(self):
1101 return self._status[1]
1101 return self._status[1]
1102 def removed(self):
1102 def removed(self):
1103 return self._status[2]
1103 return self._status[2]
1104 def deleted(self):
1104 def deleted(self):
1105 return self._status[3]
1105 return self._status[3]
1106 def unknown(self):
1106 def unknown(self):
1107 return self._status[4]
1107 return self._status[4]
1108 def ignored(self):
1108 def ignored(self):
1109 return self._status[5]
1109 return self._status[5]
1110 def clean(self):
1110 def clean(self):
1111 return self._status[6]
1111 return self._status[6]
1112 def branch(self):
1112 def branch(self):
1113 return encoding.tolocal(self._extra['branch'])
1113 return encoding.tolocal(self._extra['branch'])
1114 def extra(self):
1114 def extra(self):
1115 return self._extra
1115 return self._extra
1116 def flags(self, f):
1116 def flags(self, f):
1117 return self[f].flags()
1117 return self[f].flags()
1118
1118
1119 def parents(self):
1119 def parents(self):
1120 """return contexts for each parent changeset"""
1120 """return contexts for each parent changeset"""
1121 return self._parents
1121 return self._parents
1122
1122
1123 def filectx(self, path, filelog=None):
1123 def filectx(self, path, filelog=None):
1124 """get a file context from the working directory"""
1124 """get a file context from the working directory"""
1125 return self._filectxfn(self._repo, self, path)
1125 return self._filectxfn(self._repo, self, path)
1126
1126
1127 def commit(self):
1127 def commit(self):
1128 """commit context to the repo"""
1128 """commit context to the repo"""
1129 return self._repo.commitctx(self)
1129 return self._repo.commitctx(self)
1130
1130
1131 class memfilectx(object):
1131 class memfilectx(object):
1132 """memfilectx represents an in-memory file to commit.
1132 """memfilectx represents an in-memory file to commit.
1133
1133
1134 See memctx for more details.
1134 See memctx for more details.
1135 """
1135 """
1136 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1136 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1137 """
1137 """
1138 path is the normalized file path relative to repository root.
1138 path is the normalized file path relative to repository root.
1139 data is the file content as a string.
1139 data is the file content as a string.
1140 islink is True if the file is a symbolic link.
1140 islink is True if the file is a symbolic link.
1141 isexec is True if the file is executable.
1141 isexec is True if the file is executable.
1142 copied is the source file path if current file was copied in the
1142 copied is the source file path if current file was copied in the
1143 revision being committed, or None."""
1143 revision being committed, or None."""
1144 self._path = path
1144 self._path = path
1145 self._data = data
1145 self._data = data
1146 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1146 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1147 self._copied = None
1147 self._copied = None
1148 if copied:
1148 if copied:
1149 self._copied = (copied, nullid)
1149 self._copied = (copied, nullid)
1150
1150
1151 def __nonzero__(self):
1151 def __nonzero__(self):
1152 return True
1152 return True
1153 def __str__(self):
1153 def __str__(self):
1154 return "%s@%s" % (self.path(), self._changectx)
1154 return "%s@%s" % (self.path(), self._changectx)
1155 def path(self):
1155 def path(self):
1156 return self._path
1156 return self._path
1157 def data(self):
1157 def data(self):
1158 return self._data
1158 return self._data
1159 def flags(self):
1159 def flags(self):
1160 return self._flags
1160 return self._flags
1161 def isexec(self):
1161 def isexec(self):
1162 return 'x' in self._flags
1162 return 'x' in self._flags
1163 def islink(self):
1163 def islink(self):
1164 return 'l' in self._flags
1164 return 'l' in self._flags
1165 def renamed(self):
1165 def renamed(self):
1166 return self._copied
1166 return self._copied
@@ -1,2220 +1,2221
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131 normsubpath = util.pconvert(subpath)
131 normsubpath = util.pconvert(subpath)
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = '/'.join(parts)
153 prefix = '/'.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == normsubpath:
155 if prefix == normsubpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @filecache('bookmarks')
164 @filecache('bookmarks')
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @filecache('bookmarks.current')
168 @filecache('bookmarks.current')
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 def _writebookmarks(self, marks):
172 def _writebookmarks(self, marks):
173 bookmarks.write(self)
173 bookmarks.write(self)
174
174
175 @filecache('phaseroots')
175 @filecache('phaseroots')
176 def _phaseroots(self):
176 def _phaseroots(self):
177 self._dirtyphases = False
177 self._dirtyphases = False
178 phaseroots = phases.readroots(self)
178 phaseroots = phases.readroots(self)
179 phases.filterunknown(self, phaseroots)
179 phases.filterunknown(self, phaseroots)
180 return phaseroots
180 return phaseroots
181
181
182 @propertycache
182 @propertycache
183 def _phaserev(self):
183 def _phaserev(self):
184 cache = [0] * len(self)
184 cache = [phases.public] * len(self)
185 for phase in phases.trackedphases:
185 for phase in phases.trackedphases:
186 roots = map(self.changelog.rev, self._phaseroots[phase])
186 roots = map(self.changelog.rev, self._phaseroots[phase])
187 if roots:
187 if roots:
188 for rev in roots:
188 for rev in roots:
189 cache[rev] = phase
189 cache[rev] = phase
190 for rev in self.changelog.descendants(*roots):
190 for rev in self.changelog.descendants(*roots):
191 cache[rev] = phase
191 cache[rev] = phase
192 return cache
192 return cache
193
193
194 @filecache('00changelog.i', True)
194 @filecache('00changelog.i', True)
195 def changelog(self):
195 def changelog(self):
196 c = changelog.changelog(self.sopener)
196 c = changelog.changelog(self.sopener)
197 if 'HG_PENDING' in os.environ:
197 if 'HG_PENDING' in os.environ:
198 p = os.environ['HG_PENDING']
198 p = os.environ['HG_PENDING']
199 if p.startswith(self.root):
199 if p.startswith(self.root):
200 c.readpending('00changelog.i.a')
200 c.readpending('00changelog.i.a')
201 return c
201 return c
202
202
203 @filecache('00manifest.i', True)
203 @filecache('00manifest.i', True)
204 def manifest(self):
204 def manifest(self):
205 return manifest.manifest(self.sopener)
205 return manifest.manifest(self.sopener)
206
206
207 @filecache('dirstate')
207 @filecache('dirstate')
208 def dirstate(self):
208 def dirstate(self):
209 warned = [0]
209 warned = [0]
210 def validate(node):
210 def validate(node):
211 try:
211 try:
212 self.changelog.rev(node)
212 self.changelog.rev(node)
213 return node
213 return node
214 except error.LookupError:
214 except error.LookupError:
215 if not warned[0]:
215 if not warned[0]:
216 warned[0] = True
216 warned[0] = True
217 self.ui.warn(_("warning: ignoring unknown"
217 self.ui.warn(_("warning: ignoring unknown"
218 " working parent %s!\n") % short(node))
218 " working parent %s!\n") % short(node))
219 return nullid
219 return nullid
220
220
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222
222
223 def __getitem__(self, changeid):
223 def __getitem__(self, changeid):
224 if changeid is None:
224 if changeid is None:
225 return context.workingctx(self)
225 return context.workingctx(self)
226 return context.changectx(self, changeid)
226 return context.changectx(self, changeid)
227
227
228 def __contains__(self, changeid):
228 def __contains__(self, changeid):
229 try:
229 try:
230 return bool(self.lookup(changeid))
230 return bool(self.lookup(changeid))
231 except error.RepoLookupError:
231 except error.RepoLookupError:
232 return False
232 return False
233
233
234 def __nonzero__(self):
234 def __nonzero__(self):
235 return True
235 return True
236
236
237 def __len__(self):
237 def __len__(self):
238 return len(self.changelog)
238 return len(self.changelog)
239
239
240 def __iter__(self):
240 def __iter__(self):
241 for i in xrange(len(self)):
241 for i in xrange(len(self)):
242 yield i
242 yield i
243
243
244 def revs(self, expr, *args):
244 def revs(self, expr, *args):
245 '''Return a list of revisions matching the given revset'''
245 '''Return a list of revisions matching the given revset'''
246 expr = revset.formatspec(expr, *args)
246 expr = revset.formatspec(expr, *args)
247 m = revset.match(None, expr)
247 m = revset.match(None, expr)
248 return [r for r in m(self, range(len(self)))]
248 return [r for r in m(self, range(len(self)))]
249
249
250 def set(self, expr, *args):
250 def set(self, expr, *args):
251 '''
251 '''
252 Yield a context for each matching revision, after doing arg
252 Yield a context for each matching revision, after doing arg
253 replacement via revset.formatspec
253 replacement via revset.formatspec
254 '''
254 '''
255 for r in self.revs(expr, *args):
255 for r in self.revs(expr, *args):
256 yield self[r]
256 yield self[r]
257
257
258 def url(self):
258 def url(self):
259 return 'file:' + self.root
259 return 'file:' + self.root
260
260
261 def hook(self, name, throw=False, **args):
261 def hook(self, name, throw=False, **args):
262 return hook.hook(self.ui, self, name, throw, **args)
262 return hook.hook(self.ui, self, name, throw, **args)
263
263
264 tag_disallowed = ':\r\n'
264 tag_disallowed = ':\r\n'
265
265
266 def _tag(self, names, node, message, local, user, date, extra={}):
266 def _tag(self, names, node, message, local, user, date, extra={}):
267 if isinstance(names, str):
267 if isinstance(names, str):
268 allchars = names
268 allchars = names
269 names = (names,)
269 names = (names,)
270 else:
270 else:
271 allchars = ''.join(names)
271 allchars = ''.join(names)
272 for c in self.tag_disallowed:
272 for c in self.tag_disallowed:
273 if c in allchars:
273 if c in allchars:
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
275
275
276 branches = self.branchmap()
276 branches = self.branchmap()
277 for name in names:
277 for name in names:
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 local=local)
279 local=local)
280 if name in branches:
280 if name in branches:
281 self.ui.warn(_("warning: tag %s conflicts with existing"
281 self.ui.warn(_("warning: tag %s conflicts with existing"
282 " branch name\n") % name)
282 " branch name\n") % name)
283
283
284 def writetags(fp, names, munge, prevtags):
284 def writetags(fp, names, munge, prevtags):
285 fp.seek(0, 2)
285 fp.seek(0, 2)
286 if prevtags and prevtags[-1] != '\n':
286 if prevtags and prevtags[-1] != '\n':
287 fp.write('\n')
287 fp.write('\n')
288 for name in names:
288 for name in names:
289 m = munge and munge(name) or name
289 m = munge and munge(name) or name
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
291 old = self.tags().get(name, nullid)
291 old = self.tags().get(name, nullid)
292 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(old), m))
293 fp.write('%s %s\n' % (hex(node), m))
293 fp.write('%s %s\n' % (hex(node), m))
294 fp.close()
294 fp.close()
295
295
296 prevtags = ''
296 prevtags = ''
297 if local:
297 if local:
298 try:
298 try:
299 fp = self.opener('localtags', 'r+')
299 fp = self.opener('localtags', 'r+')
300 except IOError:
300 except IOError:
301 fp = self.opener('localtags', 'a')
301 fp = self.opener('localtags', 'a')
302 else:
302 else:
303 prevtags = fp.read()
303 prevtags = fp.read()
304
304
305 # local tags are stored in the current charset
305 # local tags are stored in the current charset
306 writetags(fp, names, None, prevtags)
306 writetags(fp, names, None, prevtags)
307 for name in names:
307 for name in names:
308 self.hook('tag', node=hex(node), tag=name, local=local)
308 self.hook('tag', node=hex(node), tag=name, local=local)
309 return
309 return
310
310
311 try:
311 try:
312 fp = self.wfile('.hgtags', 'rb+')
312 fp = self.wfile('.hgtags', 'rb+')
313 except IOError, e:
313 except IOError, e:
314 if e.errno != errno.ENOENT:
314 if e.errno != errno.ENOENT:
315 raise
315 raise
316 fp = self.wfile('.hgtags', 'ab')
316 fp = self.wfile('.hgtags', 'ab')
317 else:
317 else:
318 prevtags = fp.read()
318 prevtags = fp.read()
319
319
320 # committed tags are stored in UTF-8
320 # committed tags are stored in UTF-8
321 writetags(fp, names, encoding.fromlocal, prevtags)
321 writetags(fp, names, encoding.fromlocal, prevtags)
322
322
323 fp.close()
323 fp.close()
324
324
325 if '.hgtags' not in self.dirstate:
325 if '.hgtags' not in self.dirstate:
326 self[None].add(['.hgtags'])
326 self[None].add(['.hgtags'])
327
327
328 m = matchmod.exact(self.root, '', ['.hgtags'])
328 m = matchmod.exact(self.root, '', ['.hgtags'])
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
330
330
331 for name in names:
331 for name in names:
332 self.hook('tag', node=hex(node), tag=name, local=local)
332 self.hook('tag', node=hex(node), tag=name, local=local)
333
333
334 return tagnode
334 return tagnode
335
335
336 def tag(self, names, node, message, local, user, date):
336 def tag(self, names, node, message, local, user, date):
337 '''tag a revision with one or more symbolic names.
337 '''tag a revision with one or more symbolic names.
338
338
339 names is a list of strings or, when adding a single tag, names may be a
339 names is a list of strings or, when adding a single tag, names may be a
340 string.
340 string.
341
341
342 if local is True, the tags are stored in a per-repository file.
342 if local is True, the tags are stored in a per-repository file.
343 otherwise, they are stored in the .hgtags file, and a new
343 otherwise, they are stored in the .hgtags file, and a new
344 changeset is committed with the change.
344 changeset is committed with the change.
345
345
346 keyword arguments:
346 keyword arguments:
347
347
348 local: whether to store tags in non-version-controlled file
348 local: whether to store tags in non-version-controlled file
349 (default False)
349 (default False)
350
350
351 message: commit message to use if committing
351 message: commit message to use if committing
352
352
353 user: name of user to use if committing
353 user: name of user to use if committing
354
354
355 date: date tuple to use if committing'''
355 date: date tuple to use if committing'''
356
356
357 if not local:
357 if not local:
358 for x in self.status()[:5]:
358 for x in self.status()[:5]:
359 if '.hgtags' in x:
359 if '.hgtags' in x:
360 raise util.Abort(_('working copy of .hgtags is changed '
360 raise util.Abort(_('working copy of .hgtags is changed '
361 '(please commit .hgtags manually)'))
361 '(please commit .hgtags manually)'))
362
362
363 self.tags() # instantiate the cache
363 self.tags() # instantiate the cache
364 self._tag(names, node, message, local, user, date)
364 self._tag(names, node, message, local, user, date)
365
365
366 @propertycache
366 @propertycache
367 def _tagscache(self):
367 def _tagscache(self):
368 '''Returns a tagscache object that contains various tags related caches.'''
368 '''Returns a tagscache object that contains various tags related caches.'''
369
369
370 # This simplifies its cache management by having one decorated
370 # This simplifies its cache management by having one decorated
371 # function (this one) and the rest simply fetch things from it.
371 # function (this one) and the rest simply fetch things from it.
372 class tagscache(object):
372 class tagscache(object):
373 def __init__(self):
373 def __init__(self):
374 # These two define the set of tags for this repository. tags
374 # These two define the set of tags for this repository. tags
375 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # maps tag name to node; tagtypes maps tag name to 'global' or
376 # 'local'. (Global tags are defined by .hgtags across all
376 # 'local'. (Global tags are defined by .hgtags across all
377 # heads, and local tags are defined in .hg/localtags.)
377 # heads, and local tags are defined in .hg/localtags.)
378 # They constitute the in-memory cache of tags.
378 # They constitute the in-memory cache of tags.
379 self.tags = self.tagtypes = None
379 self.tags = self.tagtypes = None
380
380
381 self.nodetagscache = self.tagslist = None
381 self.nodetagscache = self.tagslist = None
382
382
383 cache = tagscache()
383 cache = tagscache()
384 cache.tags, cache.tagtypes = self._findtags()
384 cache.tags, cache.tagtypes = self._findtags()
385
385
386 return cache
386 return cache
387
387
388 def tags(self):
388 def tags(self):
389 '''return a mapping of tag to node'''
389 '''return a mapping of tag to node'''
390 return self._tagscache.tags
390 return self._tagscache.tags
391
391
392 def _findtags(self):
392 def _findtags(self):
393 '''Do the hard work of finding tags. Return a pair of dicts
393 '''Do the hard work of finding tags. Return a pair of dicts
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
395 maps tag name to a string like \'global\' or \'local\'.
395 maps tag name to a string like \'global\' or \'local\'.
396 Subclasses or extensions are free to add their own tags, but
396 Subclasses or extensions are free to add their own tags, but
397 should be aware that the returned dicts will be retained for the
397 should be aware that the returned dicts will be retained for the
398 duration of the localrepo object.'''
398 duration of the localrepo object.'''
399
399
400 # XXX what tagtype should subclasses/extensions use? Currently
400 # XXX what tagtype should subclasses/extensions use? Currently
401 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # mq and bookmarks add tags, but do not set the tagtype at all.
402 # Should each extension invent its own tag type? Should there
402 # Should each extension invent its own tag type? Should there
403 # be one tagtype for all such "virtual" tags? Or is the status
403 # be one tagtype for all such "virtual" tags? Or is the status
404 # quo fine?
404 # quo fine?
405
405
406 alltags = {} # map tag name to (node, hist)
406 alltags = {} # map tag name to (node, hist)
407 tagtypes = {}
407 tagtypes = {}
408
408
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
411
411
412 # Build the return dicts. Have to re-encode tag names because
412 # Build the return dicts. Have to re-encode tag names because
413 # the tags module always uses UTF-8 (in order not to lose info
413 # the tags module always uses UTF-8 (in order not to lose info
414 # writing to the cache), but the rest of Mercurial wants them in
414 # writing to the cache), but the rest of Mercurial wants them in
415 # local encoding.
415 # local encoding.
416 tags = {}
416 tags = {}
417 for (name, (node, hist)) in alltags.iteritems():
417 for (name, (node, hist)) in alltags.iteritems():
418 if node != nullid:
418 if node != nullid:
419 try:
419 try:
420 # ignore tags to unknown nodes
420 # ignore tags to unknown nodes
421 self.changelog.lookup(node)
421 self.changelog.lookup(node)
422 tags[encoding.tolocal(name)] = node
422 tags[encoding.tolocal(name)] = node
423 except error.LookupError:
423 except error.LookupError:
424 pass
424 pass
425 tags['tip'] = self.changelog.tip()
425 tags['tip'] = self.changelog.tip()
426 tagtypes = dict([(encoding.tolocal(name), value)
426 tagtypes = dict([(encoding.tolocal(name), value)
427 for (name, value) in tagtypes.iteritems()])
427 for (name, value) in tagtypes.iteritems()])
428 return (tags, tagtypes)
428 return (tags, tagtypes)
429
429
430 def tagtype(self, tagname):
430 def tagtype(self, tagname):
431 '''
431 '''
432 return the type of the given tag. result can be:
432 return the type of the given tag. result can be:
433
433
434 'local' : a local tag
434 'local' : a local tag
435 'global' : a global tag
435 'global' : a global tag
436 None : tag does not exist
436 None : tag does not exist
437 '''
437 '''
438
438
439 return self._tagscache.tagtypes.get(tagname)
439 return self._tagscache.tagtypes.get(tagname)
440
440
441 def tagslist(self):
441 def tagslist(self):
442 '''return a list of tags ordered by revision'''
442 '''return a list of tags ordered by revision'''
443 if not self._tagscache.tagslist:
443 if not self._tagscache.tagslist:
444 l = []
444 l = []
445 for t, n in self.tags().iteritems():
445 for t, n in self.tags().iteritems():
446 r = self.changelog.rev(n)
446 r = self.changelog.rev(n)
447 l.append((r, t, n))
447 l.append((r, t, n))
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
449
449
450 return self._tagscache.tagslist
450 return self._tagscache.tagslist
451
451
452 def nodetags(self, node):
452 def nodetags(self, node):
453 '''return the tags associated with a node'''
453 '''return the tags associated with a node'''
454 if not self._tagscache.nodetagscache:
454 if not self._tagscache.nodetagscache:
455 nodetagscache = {}
455 nodetagscache = {}
456 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
457 nodetagscache.setdefault(n, []).append(t)
457 nodetagscache.setdefault(n, []).append(t)
458 for tags in nodetagscache.itervalues():
458 for tags in nodetagscache.itervalues():
459 tags.sort()
459 tags.sort()
460 self._tagscache.nodetagscache = nodetagscache
460 self._tagscache.nodetagscache = nodetagscache
461 return self._tagscache.nodetagscache.get(node, [])
461 return self._tagscache.nodetagscache.get(node, [])
462
462
463 def nodebookmarks(self, node):
463 def nodebookmarks(self, node):
464 marks = []
464 marks = []
465 for bookmark, n in self._bookmarks.iteritems():
465 for bookmark, n in self._bookmarks.iteritems():
466 if n == node:
466 if n == node:
467 marks.append(bookmark)
467 marks.append(bookmark)
468 return sorted(marks)
468 return sorted(marks)
469
469
470 def _branchtags(self, partial, lrev):
470 def _branchtags(self, partial, lrev):
471 # TODO: rename this function?
471 # TODO: rename this function?
472 tiprev = len(self) - 1
472 tiprev = len(self) - 1
473 if lrev != tiprev:
473 if lrev != tiprev:
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
475 self._updatebranchcache(partial, ctxgen)
475 self._updatebranchcache(partial, ctxgen)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
477
477
478 return partial
478 return partial
479
479
480 def updatebranchcache(self):
480 def updatebranchcache(self):
481 tip = self.changelog.tip()
481 tip = self.changelog.tip()
482 if self._branchcache is not None and self._branchcachetip == tip:
482 if self._branchcache is not None and self._branchcachetip == tip:
483 return self._branchcache
483 return self._branchcache
484
484
485 oldtip = self._branchcachetip
485 oldtip = self._branchcachetip
486 self._branchcachetip = tip
486 self._branchcachetip = tip
487 if oldtip is None or oldtip not in self.changelog.nodemap:
487 if oldtip is None or oldtip not in self.changelog.nodemap:
488 partial, last, lrev = self._readbranchcache()
488 partial, last, lrev = self._readbranchcache()
489 else:
489 else:
490 lrev = self.changelog.rev(oldtip)
490 lrev = self.changelog.rev(oldtip)
491 partial = self._branchcache
491 partial = self._branchcache
492
492
493 self._branchtags(partial, lrev)
493 self._branchtags(partial, lrev)
494 # this private cache holds all heads (not just tips)
494 # this private cache holds all heads (not just tips)
495 self._branchcache = partial
495 self._branchcache = partial
496
496
497 def branchmap(self):
497 def branchmap(self):
498 '''returns a dictionary {branch: [branchheads]}'''
498 '''returns a dictionary {branch: [branchheads]}'''
499 self.updatebranchcache()
499 self.updatebranchcache()
500 return self._branchcache
500 return self._branchcache
501
501
502 def branchtags(self):
502 def branchtags(self):
503 '''return a dict where branch names map to the tipmost head of
503 '''return a dict where branch names map to the tipmost head of
504 the branch, open heads come before closed'''
504 the branch, open heads come before closed'''
505 bt = {}
505 bt = {}
506 for bn, heads in self.branchmap().iteritems():
506 for bn, heads in self.branchmap().iteritems():
507 tip = heads[-1]
507 tip = heads[-1]
508 for h in reversed(heads):
508 for h in reversed(heads):
509 if 'close' not in self.changelog.read(h)[5]:
509 if 'close' not in self.changelog.read(h)[5]:
510 tip = h
510 tip = h
511 break
511 break
512 bt[bn] = tip
512 bt[bn] = tip
513 return bt
513 return bt
514
514
515 def _readbranchcache(self):
515 def _readbranchcache(self):
516 partial = {}
516 partial = {}
517 try:
517 try:
518 f = self.opener("cache/branchheads")
518 f = self.opener("cache/branchheads")
519 lines = f.read().split('\n')
519 lines = f.read().split('\n')
520 f.close()
520 f.close()
521 except (IOError, OSError):
521 except (IOError, OSError):
522 return {}, nullid, nullrev
522 return {}, nullid, nullrev
523
523
524 try:
524 try:
525 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = lines.pop(0).split(" ", 1)
526 last, lrev = bin(last), int(lrev)
526 last, lrev = bin(last), int(lrev)
527 if lrev >= len(self) or self[lrev].node() != last:
527 if lrev >= len(self) or self[lrev].node() != last:
528 # invalidate the cache
528 # invalidate the cache
529 raise ValueError('invalidating branch cache (tip differs)')
529 raise ValueError('invalidating branch cache (tip differs)')
530 for l in lines:
530 for l in lines:
531 if not l:
531 if not l:
532 continue
532 continue
533 node, label = l.split(" ", 1)
533 node, label = l.split(" ", 1)
534 label = encoding.tolocal(label.strip())
534 label = encoding.tolocal(label.strip())
535 partial.setdefault(label, []).append(bin(node))
535 partial.setdefault(label, []).append(bin(node))
536 except KeyboardInterrupt:
536 except KeyboardInterrupt:
537 raise
537 raise
538 except Exception, inst:
538 except Exception, inst:
539 if self.ui.debugflag:
539 if self.ui.debugflag:
540 self.ui.warn(str(inst), '\n')
540 self.ui.warn(str(inst), '\n')
541 partial, last, lrev = {}, nullid, nullrev
541 partial, last, lrev = {}, nullid, nullrev
542 return partial, last, lrev
542 return partial, last, lrev
543
543
544 def _writebranchcache(self, branches, tip, tiprev):
544 def _writebranchcache(self, branches, tip, tiprev):
545 try:
545 try:
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
547 f.write("%s %s\n" % (hex(tip), tiprev))
547 f.write("%s %s\n" % (hex(tip), tiprev))
548 for label, nodes in branches.iteritems():
548 for label, nodes in branches.iteritems():
549 for node in nodes:
549 for node in nodes:
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
551 f.close()
551 f.close()
552 except (IOError, OSError):
552 except (IOError, OSError):
553 pass
553 pass
554
554
555 def _updatebranchcache(self, partial, ctxgen):
555 def _updatebranchcache(self, partial, ctxgen):
556 # collect new branch entries
556 # collect new branch entries
557 newbranches = {}
557 newbranches = {}
558 for c in ctxgen:
558 for c in ctxgen:
559 newbranches.setdefault(c.branch(), []).append(c.node())
559 newbranches.setdefault(c.branch(), []).append(c.node())
560 # if older branchheads are reachable from new ones, they aren't
560 # if older branchheads are reachable from new ones, they aren't
561 # really branchheads. Note checking parents is insufficient:
561 # really branchheads. Note checking parents is insufficient:
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
563 for branch, newnodes in newbranches.iteritems():
563 for branch, newnodes in newbranches.iteritems():
564 bheads = partial.setdefault(branch, [])
564 bheads = partial.setdefault(branch, [])
565 bheads.extend(newnodes)
565 bheads.extend(newnodes)
566 if len(bheads) <= 1:
566 if len(bheads) <= 1:
567 continue
567 continue
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
569 # starting from tip means fewer passes over reachable
569 # starting from tip means fewer passes over reachable
570 while newnodes:
570 while newnodes:
571 latest = newnodes.pop()
571 latest = newnodes.pop()
572 if latest not in bheads:
572 if latest not in bheads:
573 continue
573 continue
574 minbhrev = self[bheads[0]].node()
574 minbhrev = self[bheads[0]].node()
575 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable = self.changelog.reachable(latest, minbhrev)
576 reachable.remove(latest)
576 reachable.remove(latest)
577 if reachable:
577 if reachable:
578 bheads = [b for b in bheads if b not in reachable]
578 bheads = [b for b in bheads if b not in reachable]
579 partial[branch] = bheads
579 partial[branch] = bheads
580
580
581 def lookup(self, key):
581 def lookup(self, key):
582 if isinstance(key, int):
582 if isinstance(key, int):
583 return self.changelog.node(key)
583 return self.changelog.node(key)
584 elif key == '.':
584 elif key == '.':
585 return self.dirstate.p1()
585 return self.dirstate.p1()
586 elif key == 'null':
586 elif key == 'null':
587 return nullid
587 return nullid
588 elif key == 'tip':
588 elif key == 'tip':
589 return self.changelog.tip()
589 return self.changelog.tip()
590 n = self.changelog._match(key)
590 n = self.changelog._match(key)
591 if n:
591 if n:
592 return n
592 return n
593 if key in self._bookmarks:
593 if key in self._bookmarks:
594 return self._bookmarks[key]
594 return self._bookmarks[key]
595 if key in self.tags():
595 if key in self.tags():
596 return self.tags()[key]
596 return self.tags()[key]
597 if key in self.branchtags():
597 if key in self.branchtags():
598 return self.branchtags()[key]
598 return self.branchtags()[key]
599 n = self.changelog._partialmatch(key)
599 n = self.changelog._partialmatch(key)
600 if n:
600 if n:
601 return n
601 return n
602
602
603 # can't find key, check if it might have come from damaged dirstate
603 # can't find key, check if it might have come from damaged dirstate
604 if key in self.dirstate.parents():
604 if key in self.dirstate.parents():
605 raise error.Abort(_("working directory has unknown parent '%s'!")
605 raise error.Abort(_("working directory has unknown parent '%s'!")
606 % short(key))
606 % short(key))
607 try:
607 try:
608 if len(key) == 20:
608 if len(key) == 20:
609 key = hex(key)
609 key = hex(key)
610 except TypeError:
610 except TypeError:
611 pass
611 pass
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
613
613
614 def lookupbranch(self, key, remote=None):
614 def lookupbranch(self, key, remote=None):
615 repo = remote or self
615 repo = remote or self
616 if key in repo.branchmap():
616 if key in repo.branchmap():
617 return key
617 return key
618
618
619 repo = (remote and remote.local()) and remote or self
619 repo = (remote and remote.local()) and remote or self
620 return repo[key].branch()
620 return repo[key].branch()
621
621
622 def known(self, nodes):
622 def known(self, nodes):
623 nm = self.changelog.nodemap
623 nm = self.changelog.nodemap
624 return [(n in nm) for n in nodes]
624 return [(n in nm) for n in nodes]
625
625
626 def local(self):
626 def local(self):
627 return self
627 return self
628
628
629 def join(self, f):
629 def join(self, f):
630 return os.path.join(self.path, f)
630 return os.path.join(self.path, f)
631
631
632 def wjoin(self, f):
632 def wjoin(self, f):
633 return os.path.join(self.root, f)
633 return os.path.join(self.root, f)
634
634
635 def file(self, f):
635 def file(self, f):
636 if f[0] == '/':
636 if f[0] == '/':
637 f = f[1:]
637 f = f[1:]
638 return filelog.filelog(self.sopener, f)
638 return filelog.filelog(self.sopener, f)
639
639
640 def changectx(self, changeid):
640 def changectx(self, changeid):
641 return self[changeid]
641 return self[changeid]
642
642
643 def parents(self, changeid=None):
643 def parents(self, changeid=None):
644 '''get list of changectxs for parents of changeid'''
644 '''get list of changectxs for parents of changeid'''
645 return self[changeid].parents()
645 return self[changeid].parents()
646
646
647 def filectx(self, path, changeid=None, fileid=None):
647 def filectx(self, path, changeid=None, fileid=None):
648 """changeid can be a changeset revision, node, or tag.
648 """changeid can be a changeset revision, node, or tag.
649 fileid can be a file revision or node."""
649 fileid can be a file revision or node."""
650 return context.filectx(self, path, changeid, fileid)
650 return context.filectx(self, path, changeid, fileid)
651
651
652 def getcwd(self):
652 def getcwd(self):
653 return self.dirstate.getcwd()
653 return self.dirstate.getcwd()
654
654
655 def pathto(self, f, cwd=None):
655 def pathto(self, f, cwd=None):
656 return self.dirstate.pathto(f, cwd)
656 return self.dirstate.pathto(f, cwd)
657
657
658 def wfile(self, f, mode='r'):
658 def wfile(self, f, mode='r'):
659 return self.wopener(f, mode)
659 return self.wopener(f, mode)
660
660
661 def _link(self, f):
661 def _link(self, f):
662 return os.path.islink(self.wjoin(f))
662 return os.path.islink(self.wjoin(f))
663
663
664 def _loadfilter(self, filter):
664 def _loadfilter(self, filter):
665 if filter not in self.filterpats:
665 if filter not in self.filterpats:
666 l = []
666 l = []
667 for pat, cmd in self.ui.configitems(filter):
667 for pat, cmd in self.ui.configitems(filter):
668 if cmd == '!':
668 if cmd == '!':
669 continue
669 continue
670 mf = matchmod.match(self.root, '', [pat])
670 mf = matchmod.match(self.root, '', [pat])
671 fn = None
671 fn = None
672 params = cmd
672 params = cmd
673 for name, filterfn in self._datafilters.iteritems():
673 for name, filterfn in self._datafilters.iteritems():
674 if cmd.startswith(name):
674 if cmd.startswith(name):
675 fn = filterfn
675 fn = filterfn
676 params = cmd[len(name):].lstrip()
676 params = cmd[len(name):].lstrip()
677 break
677 break
678 if not fn:
678 if not fn:
679 fn = lambda s, c, **kwargs: util.filter(s, c)
679 fn = lambda s, c, **kwargs: util.filter(s, c)
680 # Wrap old filters not supporting keyword arguments
680 # Wrap old filters not supporting keyword arguments
681 if not inspect.getargspec(fn)[2]:
681 if not inspect.getargspec(fn)[2]:
682 oldfn = fn
682 oldfn = fn
683 fn = lambda s, c, **kwargs: oldfn(s, c)
683 fn = lambda s, c, **kwargs: oldfn(s, c)
684 l.append((mf, fn, params))
684 l.append((mf, fn, params))
685 self.filterpats[filter] = l
685 self.filterpats[filter] = l
686 return self.filterpats[filter]
686 return self.filterpats[filter]
687
687
688 def _filter(self, filterpats, filename, data):
688 def _filter(self, filterpats, filename, data):
689 for mf, fn, cmd in filterpats:
689 for mf, fn, cmd in filterpats:
690 if mf(filename):
690 if mf(filename):
691 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
692 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
693 break
693 break
694
694
695 return data
695 return data
696
696
697 @propertycache
697 @propertycache
698 def _encodefilterpats(self):
698 def _encodefilterpats(self):
699 return self._loadfilter('encode')
699 return self._loadfilter('encode')
700
700
701 @propertycache
701 @propertycache
702 def _decodefilterpats(self):
702 def _decodefilterpats(self):
703 return self._loadfilter('decode')
703 return self._loadfilter('decode')
704
704
705 def adddatafilter(self, name, filter):
705 def adddatafilter(self, name, filter):
706 self._datafilters[name] = filter
706 self._datafilters[name] = filter
707
707
708 def wread(self, filename):
708 def wread(self, filename):
709 if self._link(filename):
709 if self._link(filename):
710 data = os.readlink(self.wjoin(filename))
710 data = os.readlink(self.wjoin(filename))
711 else:
711 else:
712 data = self.wopener.read(filename)
712 data = self.wopener.read(filename)
713 return self._filter(self._encodefilterpats, filename, data)
713 return self._filter(self._encodefilterpats, filename, data)
714
714
715 def wwrite(self, filename, data, flags):
715 def wwrite(self, filename, data, flags):
716 data = self._filter(self._decodefilterpats, filename, data)
716 data = self._filter(self._decodefilterpats, filename, data)
717 if 'l' in flags:
717 if 'l' in flags:
718 self.wopener.symlink(data, filename)
718 self.wopener.symlink(data, filename)
719 else:
719 else:
720 self.wopener.write(filename, data)
720 self.wopener.write(filename, data)
721 if 'x' in flags:
721 if 'x' in flags:
722 util.setflags(self.wjoin(filename), False, True)
722 util.setflags(self.wjoin(filename), False, True)
723
723
724 def wwritedata(self, filename, data):
724 def wwritedata(self, filename, data):
725 return self._filter(self._decodefilterpats, filename, data)
725 return self._filter(self._decodefilterpats, filename, data)
726
726
727 def transaction(self, desc):
727 def transaction(self, desc):
728 tr = self._transref and self._transref() or None
728 tr = self._transref and self._transref() or None
729 if tr and tr.running():
729 if tr and tr.running():
730 return tr.nest()
730 return tr.nest()
731
731
732 # abort here if the journal already exists
732 # abort here if the journal already exists
733 if os.path.exists(self.sjoin("journal")):
733 if os.path.exists(self.sjoin("journal")):
734 raise error.RepoError(
734 raise error.RepoError(
735 _("abandoned transaction found - run hg recover"))
735 _("abandoned transaction found - run hg recover"))
736
736
737 journalfiles = self._writejournal(desc)
737 journalfiles = self._writejournal(desc)
738 renames = [(x, undoname(x)) for x in journalfiles]
738 renames = [(x, undoname(x)) for x in journalfiles]
739
739
740 tr = transaction.transaction(self.ui.warn, self.sopener,
740 tr = transaction.transaction(self.ui.warn, self.sopener,
741 self.sjoin("journal"),
741 self.sjoin("journal"),
742 aftertrans(renames),
742 aftertrans(renames),
743 self.store.createmode)
743 self.store.createmode)
744 self._transref = weakref.ref(tr)
744 self._transref = weakref.ref(tr)
745 return tr
745 return tr
746
746
747 def _writejournal(self, desc):
747 def _writejournal(self, desc):
748 # save dirstate for rollback
748 # save dirstate for rollback
749 try:
749 try:
750 ds = self.opener.read("dirstate")
750 ds = self.opener.read("dirstate")
751 except IOError:
751 except IOError:
752 ds = ""
752 ds = ""
753 self.opener.write("journal.dirstate", ds)
753 self.opener.write("journal.dirstate", ds)
754 self.opener.write("journal.branch",
754 self.opener.write("journal.branch",
755 encoding.fromlocal(self.dirstate.branch()))
755 encoding.fromlocal(self.dirstate.branch()))
756 self.opener.write("journal.desc",
756 self.opener.write("journal.desc",
757 "%d\n%s\n" % (len(self), desc))
757 "%d\n%s\n" % (len(self), desc))
758
758
759 bkname = self.join('bookmarks')
759 bkname = self.join('bookmarks')
760 if os.path.exists(bkname):
760 if os.path.exists(bkname):
761 util.copyfile(bkname, self.join('journal.bookmarks'))
761 util.copyfile(bkname, self.join('journal.bookmarks'))
762 else:
762 else:
763 self.opener.write('journal.bookmarks', '')
763 self.opener.write('journal.bookmarks', '')
764 phasesname = self.sjoin('phaseroots')
764 phasesname = self.sjoin('phaseroots')
765 if os.path.exists(phasesname):
765 if os.path.exists(phasesname):
766 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
767 else:
767 else:
768 self.sopener.write('journal.phaseroots', '')
768 self.sopener.write('journal.phaseroots', '')
769
769
770 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 return (self.sjoin('journal'), self.join('journal.dirstate'),
771 self.join('journal.branch'), self.join('journal.desc'),
771 self.join('journal.branch'), self.join('journal.desc'),
772 self.join('journal.bookmarks'),
772 self.join('journal.bookmarks'),
773 self.sjoin('journal.phaseroots'))
773 self.sjoin('journal.phaseroots'))
774
774
775 def recover(self):
775 def recover(self):
776 lock = self.lock()
776 lock = self.lock()
777 try:
777 try:
778 if os.path.exists(self.sjoin("journal")):
778 if os.path.exists(self.sjoin("journal")):
779 self.ui.status(_("rolling back interrupted transaction\n"))
779 self.ui.status(_("rolling back interrupted transaction\n"))
780 transaction.rollback(self.sopener, self.sjoin("journal"),
780 transaction.rollback(self.sopener, self.sjoin("journal"),
781 self.ui.warn)
781 self.ui.warn)
782 self.invalidate()
782 self.invalidate()
783 return True
783 return True
784 else:
784 else:
785 self.ui.warn(_("no interrupted transaction available\n"))
785 self.ui.warn(_("no interrupted transaction available\n"))
786 return False
786 return False
787 finally:
787 finally:
788 lock.release()
788 lock.release()
789
789
790 def rollback(self, dryrun=False, force=False):
790 def rollback(self, dryrun=False, force=False):
791 wlock = lock = None
791 wlock = lock = None
792 try:
792 try:
793 wlock = self.wlock()
793 wlock = self.wlock()
794 lock = self.lock()
794 lock = self.lock()
795 if os.path.exists(self.sjoin("undo")):
795 if os.path.exists(self.sjoin("undo")):
796 return self._rollback(dryrun, force)
796 return self._rollback(dryrun, force)
797 else:
797 else:
798 self.ui.warn(_("no rollback information available\n"))
798 self.ui.warn(_("no rollback information available\n"))
799 return 1
799 return 1
800 finally:
800 finally:
801 release(lock, wlock)
801 release(lock, wlock)
802
802
803 def _rollback(self, dryrun, force):
803 def _rollback(self, dryrun, force):
804 ui = self.ui
804 ui = self.ui
805 try:
805 try:
806 args = self.opener.read('undo.desc').splitlines()
806 args = self.opener.read('undo.desc').splitlines()
807 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 (oldlen, desc, detail) = (int(args[0]), args[1], None)
808 if len(args) >= 3:
808 if len(args) >= 3:
809 detail = args[2]
809 detail = args[2]
810 oldtip = oldlen - 1
810 oldtip = oldlen - 1
811
811
812 if detail and ui.verbose:
812 if detail and ui.verbose:
813 msg = (_('repository tip rolled back to revision %s'
813 msg = (_('repository tip rolled back to revision %s'
814 ' (undo %s: %s)\n')
814 ' (undo %s: %s)\n')
815 % (oldtip, desc, detail))
815 % (oldtip, desc, detail))
816 else:
816 else:
817 msg = (_('repository tip rolled back to revision %s'
817 msg = (_('repository tip rolled back to revision %s'
818 ' (undo %s)\n')
818 ' (undo %s)\n')
819 % (oldtip, desc))
819 % (oldtip, desc))
820 except IOError:
820 except IOError:
821 msg = _('rolling back unknown transaction\n')
821 msg = _('rolling back unknown transaction\n')
822 desc = None
822 desc = None
823
823
824 if not force and self['.'] != self['tip'] and desc == 'commit':
824 if not force and self['.'] != self['tip'] and desc == 'commit':
825 raise util.Abort(
825 raise util.Abort(
826 _('rollback of last commit while not checked out '
826 _('rollback of last commit while not checked out '
827 'may lose data'), hint=_('use -f to force'))
827 'may lose data'), hint=_('use -f to force'))
828
828
829 ui.status(msg)
829 ui.status(msg)
830 if dryrun:
830 if dryrun:
831 return 0
831 return 0
832
832
833 parents = self.dirstate.parents()
833 parents = self.dirstate.parents()
834 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
835 if os.path.exists(self.join('undo.bookmarks')):
835 if os.path.exists(self.join('undo.bookmarks')):
836 util.rename(self.join('undo.bookmarks'),
836 util.rename(self.join('undo.bookmarks'),
837 self.join('bookmarks'))
837 self.join('bookmarks'))
838 if os.path.exists(self.sjoin('undo.phaseroots')):
838 if os.path.exists(self.sjoin('undo.phaseroots')):
839 util.rename(self.sjoin('undo.phaseroots'),
839 util.rename(self.sjoin('undo.phaseroots'),
840 self.sjoin('phaseroots'))
840 self.sjoin('phaseroots'))
841 self.invalidate()
841 self.invalidate()
842
842
843 parentgone = (parents[0] not in self.changelog.nodemap or
843 parentgone = (parents[0] not in self.changelog.nodemap or
844 parents[1] not in self.changelog.nodemap)
844 parents[1] not in self.changelog.nodemap)
845 if parentgone:
845 if parentgone:
846 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
847 try:
847 try:
848 branch = self.opener.read('undo.branch')
848 branch = self.opener.read('undo.branch')
849 self.dirstate.setbranch(branch)
849 self.dirstate.setbranch(branch)
850 except IOError:
850 except IOError:
851 ui.warn(_('named branch could not be reset: '
851 ui.warn(_('named branch could not be reset: '
852 'current branch is still \'%s\'\n')
852 'current branch is still \'%s\'\n')
853 % self.dirstate.branch())
853 % self.dirstate.branch())
854
854
855 self.dirstate.invalidate()
855 self.dirstate.invalidate()
856 parents = tuple([p.rev() for p in self.parents()])
856 parents = tuple([p.rev() for p in self.parents()])
857 if len(parents) > 1:
857 if len(parents) > 1:
858 ui.status(_('working directory now based on '
858 ui.status(_('working directory now based on '
859 'revisions %d and %d\n') % parents)
859 'revisions %d and %d\n') % parents)
860 else:
860 else:
861 ui.status(_('working directory now based on '
861 ui.status(_('working directory now based on '
862 'revision %d\n') % parents)
862 'revision %d\n') % parents)
863 self.destroyed()
863 self.destroyed()
864 return 0
864 return 0
865
865
866 def invalidatecaches(self):
866 def invalidatecaches(self):
867 try:
867 try:
868 delattr(self, '_tagscache')
868 delattr(self, '_tagscache')
869 except AttributeError:
869 except AttributeError:
870 pass
870 pass
871
871
872 self._branchcache = None # in UTF-8
872 self._branchcache = None # in UTF-8
873 self._branchcachetip = None
873 self._branchcachetip = None
874
874
875 def invalidatedirstate(self):
875 def invalidatedirstate(self):
876 '''Invalidates the dirstate, causing the next call to dirstate
876 '''Invalidates the dirstate, causing the next call to dirstate
877 to check if it was modified since the last time it was read,
877 to check if it was modified since the last time it was read,
878 rereading it if it has.
878 rereading it if it has.
879
879
880 This is different to dirstate.invalidate() that it doesn't always
880 This is different to dirstate.invalidate() that it doesn't always
881 rereads the dirstate. Use dirstate.invalidate() if you want to
881 rereads the dirstate. Use dirstate.invalidate() if you want to
882 explicitly read the dirstate again (i.e. restoring it to a previous
882 explicitly read the dirstate again (i.e. restoring it to a previous
883 known good state).'''
883 known good state).'''
884 try:
884 try:
885 delattr(self, 'dirstate')
885 delattr(self, 'dirstate')
886 except AttributeError:
886 except AttributeError:
887 pass
887 pass
888
888
889 def invalidate(self):
889 def invalidate(self):
890 for k in self._filecache:
890 for k in self._filecache:
891 # dirstate is invalidated separately in invalidatedirstate()
891 # dirstate is invalidated separately in invalidatedirstate()
892 if k == 'dirstate':
892 if k == 'dirstate':
893 continue
893 continue
894
894
895 try:
895 try:
896 delattr(self, k)
896 delattr(self, k)
897 except AttributeError:
897 except AttributeError:
898 pass
898 pass
899 self.invalidatecaches()
899 self.invalidatecaches()
900
900
901 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
902 try:
902 try:
903 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 l = lock.lock(lockname, 0, releasefn, desc=desc)
904 except error.LockHeld, inst:
904 except error.LockHeld, inst:
905 if not wait:
905 if not wait:
906 raise
906 raise
907 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 self.ui.warn(_("waiting for lock on %s held by %r\n") %
908 (desc, inst.locker))
908 (desc, inst.locker))
909 # default to 600 seconds timeout
909 # default to 600 seconds timeout
910 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
911 releasefn, desc=desc)
911 releasefn, desc=desc)
912 if acquirefn:
912 if acquirefn:
913 acquirefn()
913 acquirefn()
914 return l
914 return l
915
915
916 def _afterlock(self, callback):
916 def _afterlock(self, callback):
917 """add a callback to the current repository lock.
917 """add a callback to the current repository lock.
918
918
919 The callback will be executed on lock release."""
919 The callback will be executed on lock release."""
920 l = self._lockref and self._lockref()
920 l = self._lockref and self._lockref()
921 if l:
921 if l:
922 l.postrelease.append(callback)
922 l.postrelease.append(callback)
923
923
924 def lock(self, wait=True):
924 def lock(self, wait=True):
925 '''Lock the repository store (.hg/store) and return a weak reference
925 '''Lock the repository store (.hg/store) and return a weak reference
926 to the lock. Use this before modifying the store (e.g. committing or
926 to the lock. Use this before modifying the store (e.g. committing or
927 stripping). If you are opening a transaction, get a lock as well.)'''
927 stripping). If you are opening a transaction, get a lock as well.)'''
928 l = self._lockref and self._lockref()
928 l = self._lockref and self._lockref()
929 if l is not None and l.held:
929 if l is not None and l.held:
930 l.lock()
930 l.lock()
931 return l
931 return l
932
932
933 def unlock():
933 def unlock():
934 self.store.write()
934 self.store.write()
935 if self._dirtyphases:
935 if self._dirtyphases:
936 phases.writeroots(self)
936 phases.writeroots(self)
937 for k, ce in self._filecache.items():
937 for k, ce in self._filecache.items():
938 if k == 'dirstate':
938 if k == 'dirstate':
939 continue
939 continue
940 ce.refresh()
940 ce.refresh()
941
941
942 l = self._lock(self.sjoin("lock"), wait, unlock,
942 l = self._lock(self.sjoin("lock"), wait, unlock,
943 self.invalidate, _('repository %s') % self.origroot)
943 self.invalidate, _('repository %s') % self.origroot)
944 self._lockref = weakref.ref(l)
944 self._lockref = weakref.ref(l)
945 return l
945 return l
946
946
947 def wlock(self, wait=True):
947 def wlock(self, wait=True):
948 '''Lock the non-store parts of the repository (everything under
948 '''Lock the non-store parts of the repository (everything under
949 .hg except .hg/store) and return a weak reference to the lock.
949 .hg except .hg/store) and return a weak reference to the lock.
950 Use this before modifying files in .hg.'''
950 Use this before modifying files in .hg.'''
951 l = self._wlockref and self._wlockref()
951 l = self._wlockref and self._wlockref()
952 if l is not None and l.held:
952 if l is not None and l.held:
953 l.lock()
953 l.lock()
954 return l
954 return l
955
955
956 def unlock():
956 def unlock():
957 self.dirstate.write()
957 self.dirstate.write()
958 ce = self._filecache.get('dirstate')
958 ce = self._filecache.get('dirstate')
959 if ce:
959 if ce:
960 ce.refresh()
960 ce.refresh()
961
961
962 l = self._lock(self.join("wlock"), wait, unlock,
962 l = self._lock(self.join("wlock"), wait, unlock,
963 self.invalidatedirstate, _('working directory of %s') %
963 self.invalidatedirstate, _('working directory of %s') %
964 self.origroot)
964 self.origroot)
965 self._wlockref = weakref.ref(l)
965 self._wlockref = weakref.ref(l)
966 return l
966 return l
967
967
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
969 """
969 """
970 commit an individual file as part of a larger transaction
970 commit an individual file as part of a larger transaction
971 """
971 """
972
972
973 fname = fctx.path()
973 fname = fctx.path()
974 text = fctx.data()
974 text = fctx.data()
975 flog = self.file(fname)
975 flog = self.file(fname)
976 fparent1 = manifest1.get(fname, nullid)
976 fparent1 = manifest1.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
978
978
979 meta = {}
979 meta = {}
980 copy = fctx.renamed()
980 copy = fctx.renamed()
981 if copy and copy[0] != fname:
981 if copy and copy[0] != fname:
982 # Mark the new revision of this file as a copy of another
982 # Mark the new revision of this file as a copy of another
983 # file. This copy data will effectively act as a parent
983 # file. This copy data will effectively act as a parent
984 # of this new revision. If this is a merge, the first
984 # of this new revision. If this is a merge, the first
985 # parent will be the nullid (meaning "look up the copy data")
985 # parent will be the nullid (meaning "look up the copy data")
986 # and the second one will be the other parent. For example:
986 # and the second one will be the other parent. For example:
987 #
987 #
988 # 0 --- 1 --- 3 rev1 changes file foo
988 # 0 --- 1 --- 3 rev1 changes file foo
989 # \ / rev2 renames foo to bar and changes it
989 # \ / rev2 renames foo to bar and changes it
990 # \- 2 -/ rev3 should have bar with all changes and
990 # \- 2 -/ rev3 should have bar with all changes and
991 # should record that bar descends from
991 # should record that bar descends from
992 # bar in rev2 and foo in rev1
992 # bar in rev2 and foo in rev1
993 #
993 #
994 # this allows this merge to succeed:
994 # this allows this merge to succeed:
995 #
995 #
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
998 # \- 2 --- 4 as the merge base
998 # \- 2 --- 4 as the merge base
999 #
999 #
1000
1000
1001 cfname = copy[0]
1001 cfname = copy[0]
1002 crev = manifest1.get(cfname)
1002 crev = manifest1.get(cfname)
1003 newfparent = fparent2
1003 newfparent = fparent2
1004
1004
1005 if manifest2: # branch merge
1005 if manifest2: # branch merge
1006 if fparent2 == nullid or crev is None: # copied on remote side
1006 if fparent2 == nullid or crev is None: # copied on remote side
1007 if cfname in manifest2:
1007 if cfname in manifest2:
1008 crev = manifest2[cfname]
1008 crev = manifest2[cfname]
1009 newfparent = fparent1
1009 newfparent = fparent1
1010
1010
1011 # find source in nearest ancestor if we've lost track
1011 # find source in nearest ancestor if we've lost track
1012 if not crev:
1012 if not crev:
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1014 (fname, cfname))
1014 (fname, cfname))
1015 for ancestor in self[None].ancestors():
1015 for ancestor in self[None].ancestors():
1016 if cfname in ancestor:
1016 if cfname in ancestor:
1017 crev = ancestor[cfname].filenode()
1017 crev = ancestor[cfname].filenode()
1018 break
1018 break
1019
1019
1020 if crev:
1020 if crev:
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1022 meta["copy"] = cfname
1022 meta["copy"] = cfname
1023 meta["copyrev"] = hex(crev)
1023 meta["copyrev"] = hex(crev)
1024 fparent1, fparent2 = nullid, newfparent
1024 fparent1, fparent2 = nullid, newfparent
1025 else:
1025 else:
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1027 "copied from '%s'!\n") % (fname, cfname))
1027 "copied from '%s'!\n") % (fname, cfname))
1028
1028
1029 elif fparent2 != nullid:
1029 elif fparent2 != nullid:
1030 # is one parent an ancestor of the other?
1030 # is one parent an ancestor of the other?
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1032 if fparentancestor == fparent1:
1032 if fparentancestor == fparent1:
1033 fparent1, fparent2 = fparent2, nullid
1033 fparent1, fparent2 = fparent2, nullid
1034 elif fparentancestor == fparent2:
1034 elif fparentancestor == fparent2:
1035 fparent2 = nullid
1035 fparent2 = nullid
1036
1036
1037 # is the file changed?
1037 # is the file changed?
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1039 changelist.append(fname)
1039 changelist.append(fname)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1041
1041
1042 # are just the flags changed during merge?
1042 # are just the flags changed during merge?
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1044 changelist.append(fname)
1044 changelist.append(fname)
1045
1045
1046 return fparent1
1046 return fparent1
1047
1047
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1049 editor=False, extra={}):
1049 editor=False, extra={}):
1050 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1051
1051
1052 Revision information is gathered from the working directory,
1052 Revision information is gathered from the working directory,
1053 match can be used to filter the committed files. If editor is
1053 match can be used to filter the committed files. If editor is
1054 supplied, it is called to get a commit message.
1054 supplied, it is called to get a commit message.
1055 """
1055 """
1056
1056
1057 def fail(f, msg):
1057 def fail(f, msg):
1058 raise util.Abort('%s: %s' % (f, msg))
1058 raise util.Abort('%s: %s' % (f, msg))
1059
1059
1060 if not match:
1060 if not match:
1061 match = matchmod.always(self.root, '')
1061 match = matchmod.always(self.root, '')
1062
1062
1063 if not force:
1063 if not force:
1064 vdirs = []
1064 vdirs = []
1065 match.dir = vdirs.append
1065 match.dir = vdirs.append
1066 match.bad = fail
1066 match.bad = fail
1067
1067
1068 wlock = self.wlock()
1068 wlock = self.wlock()
1069 try:
1069 try:
1070 wctx = self[None]
1070 wctx = self[None]
1071 merge = len(wctx.parents()) > 1
1071 merge = len(wctx.parents()) > 1
1072
1072
1073 if (not force and merge and match and
1073 if (not force and merge and match and
1074 (match.files() or match.anypats())):
1074 (match.files() or match.anypats())):
1075 raise util.Abort(_('cannot partially commit a merge '
1075 raise util.Abort(_('cannot partially commit a merge '
1076 '(do not specify files or patterns)'))
1076 '(do not specify files or patterns)'))
1077
1077
1078 changes = self.status(match=match, clean=force)
1078 changes = self.status(match=match, clean=force)
1079 if force:
1079 if force:
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1081
1081
1082 # check subrepos
1082 # check subrepos
1083 subs = []
1083 subs = []
1084 removedsubs = set()
1084 removedsubs = set()
1085 if '.hgsub' in wctx:
1085 if '.hgsub' in wctx:
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1087 for p in wctx.parents():
1087 for p in wctx.parents():
1088 removedsubs.update(s for s in p.substate if match(s))
1088 removedsubs.update(s for s in p.substate if match(s))
1089 for s in wctx.substate:
1089 for s in wctx.substate:
1090 removedsubs.discard(s)
1090 removedsubs.discard(s)
1091 if match(s) and wctx.sub(s).dirty():
1091 if match(s) and wctx.sub(s).dirty():
1092 subs.append(s)
1092 subs.append(s)
1093 if (subs or removedsubs):
1093 if (subs or removedsubs):
1094 if (not match('.hgsub') and
1094 if (not match('.hgsub') and
1095 '.hgsub' in (wctx.modified() + wctx.added())):
1095 '.hgsub' in (wctx.modified() + wctx.added())):
1096 raise util.Abort(
1096 raise util.Abort(
1097 _("can't commit subrepos without .hgsub"))
1097 _("can't commit subrepos without .hgsub"))
1098 if '.hgsubstate' not in changes[0]:
1098 if '.hgsubstate' not in changes[0]:
1099 changes[0].insert(0, '.hgsubstate')
1099 changes[0].insert(0, '.hgsubstate')
1100 if '.hgsubstate' in changes[2]:
1100 if '.hgsubstate' in changes[2]:
1101 changes[2].remove('.hgsubstate')
1101 changes[2].remove('.hgsubstate')
1102 elif '.hgsub' in changes[2]:
1102 elif '.hgsub' in changes[2]:
1103 # clean up .hgsubstate when .hgsub is removed
1103 # clean up .hgsubstate when .hgsub is removed
1104 if ('.hgsubstate' in wctx and
1104 if ('.hgsubstate' in wctx and
1105 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1105 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1106 changes[2].insert(0, '.hgsubstate')
1106 changes[2].insert(0, '.hgsubstate')
1107
1107
1108 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1108 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1109 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1109 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1110 if changedsubs:
1110 if changedsubs:
1111 raise util.Abort(_("uncommitted changes in subrepo %s")
1111 raise util.Abort(_("uncommitted changes in subrepo %s")
1112 % changedsubs[0],
1112 % changedsubs[0],
1113 hint=_("use --subrepos for recursive commit"))
1113 hint=_("use --subrepos for recursive commit"))
1114
1114
1115 # make sure all explicit patterns are matched
1115 # make sure all explicit patterns are matched
1116 if not force and match.files():
1116 if not force and match.files():
1117 matched = set(changes[0] + changes[1] + changes[2])
1117 matched = set(changes[0] + changes[1] + changes[2])
1118
1118
1119 for f in match.files():
1119 for f in match.files():
1120 if f == '.' or f in matched or f in wctx.substate:
1120 if f == '.' or f in matched or f in wctx.substate:
1121 continue
1121 continue
1122 if f in changes[3]: # missing
1122 if f in changes[3]: # missing
1123 fail(f, _('file not found!'))
1123 fail(f, _('file not found!'))
1124 if f in vdirs: # visited directory
1124 if f in vdirs: # visited directory
1125 d = f + '/'
1125 d = f + '/'
1126 for mf in matched:
1126 for mf in matched:
1127 if mf.startswith(d):
1127 if mf.startswith(d):
1128 break
1128 break
1129 else:
1129 else:
1130 fail(f, _("no match under directory!"))
1130 fail(f, _("no match under directory!"))
1131 elif f not in self.dirstate:
1131 elif f not in self.dirstate:
1132 fail(f, _("file not tracked!"))
1132 fail(f, _("file not tracked!"))
1133
1133
1134 if (not force and not extra.get("close") and not merge
1134 if (not force and not extra.get("close") and not merge
1135 and not (changes[0] or changes[1] or changes[2])
1135 and not (changes[0] or changes[1] or changes[2])
1136 and wctx.branch() == wctx.p1().branch()):
1136 and wctx.branch() == wctx.p1().branch()):
1137 return None
1137 return None
1138
1138
1139 ms = mergemod.mergestate(self)
1139 ms = mergemod.mergestate(self)
1140 for f in changes[0]:
1140 for f in changes[0]:
1141 if f in ms and ms[f] == 'u':
1141 if f in ms and ms[f] == 'u':
1142 raise util.Abort(_("unresolved merge conflicts "
1142 raise util.Abort(_("unresolved merge conflicts "
1143 "(see hg help resolve)"))
1143 "(see hg help resolve)"))
1144
1144
1145 cctx = context.workingctx(self, text, user, date, extra, changes)
1145 cctx = context.workingctx(self, text, user, date, extra, changes)
1146 if editor:
1146 if editor:
1147 cctx._text = editor(self, cctx, subs)
1147 cctx._text = editor(self, cctx, subs)
1148 edited = (text != cctx._text)
1148 edited = (text != cctx._text)
1149
1149
1150 # commit subs
1150 # commit subs
1151 if subs or removedsubs:
1151 if subs or removedsubs:
1152 state = wctx.substate.copy()
1152 state = wctx.substate.copy()
1153 for s in sorted(subs):
1153 for s in sorted(subs):
1154 sub = wctx.sub(s)
1154 sub = wctx.sub(s)
1155 self.ui.status(_('committing subrepository %s\n') %
1155 self.ui.status(_('committing subrepository %s\n') %
1156 subrepo.subrelpath(sub))
1156 subrepo.subrelpath(sub))
1157 sr = sub.commit(cctx._text, user, date)
1157 sr = sub.commit(cctx._text, user, date)
1158 state[s] = (state[s][0], sr)
1158 state[s] = (state[s][0], sr)
1159 subrepo.writestate(self, state)
1159 subrepo.writestate(self, state)
1160
1160
1161 # Save commit message in case this transaction gets rolled back
1161 # Save commit message in case this transaction gets rolled back
1162 # (e.g. by a pretxncommit hook). Leave the content alone on
1162 # (e.g. by a pretxncommit hook). Leave the content alone on
1163 # the assumption that the user will use the same editor again.
1163 # the assumption that the user will use the same editor again.
1164 msgfn = self.savecommitmessage(cctx._text)
1164 msgfn = self.savecommitmessage(cctx._text)
1165
1165
1166 p1, p2 = self.dirstate.parents()
1166 p1, p2 = self.dirstate.parents()
1167 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1167 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1168 try:
1168 try:
1169 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1169 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1170 ret = self.commitctx(cctx, True)
1170 ret = self.commitctx(cctx, True)
1171 except:
1171 except:
1172 if edited:
1172 if edited:
1173 self.ui.write(
1173 self.ui.write(
1174 _('note: commit message saved in %s\n') % msgfn)
1174 _('note: commit message saved in %s\n') % msgfn)
1175 raise
1175 raise
1176
1176
1177 # update bookmarks, dirstate and mergestate
1177 # update bookmarks, dirstate and mergestate
1178 bookmarks.update(self, p1, ret)
1178 bookmarks.update(self, p1, ret)
1179 for f in changes[0] + changes[1]:
1179 for f in changes[0] + changes[1]:
1180 self.dirstate.normal(f)
1180 self.dirstate.normal(f)
1181 for f in changes[2]:
1181 for f in changes[2]:
1182 self.dirstate.drop(f)
1182 self.dirstate.drop(f)
1183 self.dirstate.setparents(ret)
1183 self.dirstate.setparents(ret)
1184 ms.reset()
1184 ms.reset()
1185 finally:
1185 finally:
1186 wlock.release()
1186 wlock.release()
1187
1187
1188 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1188 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1189 return ret
1189 return ret
1190
1190
1191 def commitctx(self, ctx, error=False):
1191 def commitctx(self, ctx, error=False):
1192 """Add a new revision to current repository.
1192 """Add a new revision to current repository.
1193 Revision information is passed via the context argument.
1193 Revision information is passed via the context argument.
1194 """
1194 """
1195
1195
1196 tr = lock = None
1196 tr = lock = None
1197 removed = list(ctx.removed())
1197 removed = list(ctx.removed())
1198 p1, p2 = ctx.p1(), ctx.p2()
1198 p1, p2 = ctx.p1(), ctx.p2()
1199 user = ctx.user()
1199 user = ctx.user()
1200
1200
1201 lock = self.lock()
1201 lock = self.lock()
1202 try:
1202 try:
1203 tr = self.transaction("commit")
1203 tr = self.transaction("commit")
1204 trp = weakref.proxy(tr)
1204 trp = weakref.proxy(tr)
1205
1205
1206 if ctx.files():
1206 if ctx.files():
1207 m1 = p1.manifest().copy()
1207 m1 = p1.manifest().copy()
1208 m2 = p2.manifest()
1208 m2 = p2.manifest()
1209
1209
1210 # check in files
1210 # check in files
1211 new = {}
1211 new = {}
1212 changed = []
1212 changed = []
1213 linkrev = len(self)
1213 linkrev = len(self)
1214 for f in sorted(ctx.modified() + ctx.added()):
1214 for f in sorted(ctx.modified() + ctx.added()):
1215 self.ui.note(f + "\n")
1215 self.ui.note(f + "\n")
1216 try:
1216 try:
1217 fctx = ctx[f]
1217 fctx = ctx[f]
1218 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1218 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1219 changed)
1219 changed)
1220 m1.set(f, fctx.flags())
1220 m1.set(f, fctx.flags())
1221 except OSError, inst:
1221 except OSError, inst:
1222 self.ui.warn(_("trouble committing %s!\n") % f)
1222 self.ui.warn(_("trouble committing %s!\n") % f)
1223 raise
1223 raise
1224 except IOError, inst:
1224 except IOError, inst:
1225 errcode = getattr(inst, 'errno', errno.ENOENT)
1225 errcode = getattr(inst, 'errno', errno.ENOENT)
1226 if error or errcode and errcode != errno.ENOENT:
1226 if error or errcode and errcode != errno.ENOENT:
1227 self.ui.warn(_("trouble committing %s!\n") % f)
1227 self.ui.warn(_("trouble committing %s!\n") % f)
1228 raise
1228 raise
1229 else:
1229 else:
1230 removed.append(f)
1230 removed.append(f)
1231
1231
1232 # update manifest
1232 # update manifest
1233 m1.update(new)
1233 m1.update(new)
1234 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1234 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1235 drop = [f for f in removed if f in m1]
1235 drop = [f for f in removed if f in m1]
1236 for f in drop:
1236 for f in drop:
1237 del m1[f]
1237 del m1[f]
1238 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1238 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1239 p2.manifestnode(), (new, drop))
1239 p2.manifestnode(), (new, drop))
1240 files = changed + removed
1240 files = changed + removed
1241 else:
1241 else:
1242 mn = p1.manifestnode()
1242 mn = p1.manifestnode()
1243 files = []
1243 files = []
1244
1244
1245 # update changelog
1245 # update changelog
1246 self.changelog.delayupdate()
1246 self.changelog.delayupdate()
1247 n = self.changelog.add(mn, files, ctx.description(),
1247 n = self.changelog.add(mn, files, ctx.description(),
1248 trp, p1.node(), p2.node(),
1248 trp, p1.node(), p2.node(),
1249 user, ctx.date(), ctx.extra().copy())
1249 user, ctx.date(), ctx.extra().copy())
1250 p = lambda: self.changelog.writepending() and self.root or ""
1250 p = lambda: self.changelog.writepending() and self.root or ""
1251 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1251 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1252 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1252 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1253 parent2=xp2, pending=p)
1253 parent2=xp2, pending=p)
1254 self.changelog.finalize(trp)
1254 self.changelog.finalize(trp)
1255 # set the new commit is proper phase
1255 # set the new commit is proper phase
1256 targetphase = self.ui.configint('phases', 'new-commit', 1)
1256 targetphase = self.ui.configint('phases', 'new-commit',
1257 phases.draft)
1257 if targetphase:
1258 if targetphase:
1258 # retract boundary do not alter parent changeset.
1259 # retract boundary do not alter parent changeset.
1259 # if a parent have higher the resulting phase will
1260 # if a parent have higher the resulting phase will
1260 # be compliant anyway
1261 # be compliant anyway
1261 #
1262 #
1262 # if minimal phase was 0 we don't need to retract anything
1263 # if minimal phase was 0 we don't need to retract anything
1263 phases.retractboundary(self, targetphase, [n])
1264 phases.retractboundary(self, targetphase, [n])
1264 tr.close()
1265 tr.close()
1265
1266
1266 if self._branchcache:
1267 if self._branchcache:
1267 self.updatebranchcache()
1268 self.updatebranchcache()
1268 return n
1269 return n
1269 finally:
1270 finally:
1270 if tr:
1271 if tr:
1271 tr.release()
1272 tr.release()
1272 lock.release()
1273 lock.release()
1273
1274
1274 def destroyed(self):
1275 def destroyed(self):
1275 '''Inform the repository that nodes have been destroyed.
1276 '''Inform the repository that nodes have been destroyed.
1276 Intended for use by strip and rollback, so there's a common
1277 Intended for use by strip and rollback, so there's a common
1277 place for anything that has to be done after destroying history.'''
1278 place for anything that has to be done after destroying history.'''
1278 # XXX it might be nice if we could take the list of destroyed
1279 # XXX it might be nice if we could take the list of destroyed
1279 # nodes, but I don't see an easy way for rollback() to do that
1280 # nodes, but I don't see an easy way for rollback() to do that
1280
1281
1281 # Ensure the persistent tag cache is updated. Doing it now
1282 # Ensure the persistent tag cache is updated. Doing it now
1282 # means that the tag cache only has to worry about destroyed
1283 # means that the tag cache only has to worry about destroyed
1283 # heads immediately after a strip/rollback. That in turn
1284 # heads immediately after a strip/rollback. That in turn
1284 # guarantees that "cachetip == currenttip" (comparing both rev
1285 # guarantees that "cachetip == currenttip" (comparing both rev
1285 # and node) always means no nodes have been added or destroyed.
1286 # and node) always means no nodes have been added or destroyed.
1286
1287
1287 # XXX this is suboptimal when qrefresh'ing: we strip the current
1288 # XXX this is suboptimal when qrefresh'ing: we strip the current
1288 # head, refresh the tag cache, then immediately add a new head.
1289 # head, refresh the tag cache, then immediately add a new head.
1289 # But I think doing it this way is necessary for the "instant
1290 # But I think doing it this way is necessary for the "instant
1290 # tag cache retrieval" case to work.
1291 # tag cache retrieval" case to work.
1291 self.invalidatecaches()
1292 self.invalidatecaches()
1292
1293
1293 def walk(self, match, node=None):
1294 def walk(self, match, node=None):
1294 '''
1295 '''
1295 walk recursively through the directory tree or a given
1296 walk recursively through the directory tree or a given
1296 changeset, finding all files matched by the match
1297 changeset, finding all files matched by the match
1297 function
1298 function
1298 '''
1299 '''
1299 return self[node].walk(match)
1300 return self[node].walk(match)
1300
1301
1301 def status(self, node1='.', node2=None, match=None,
1302 def status(self, node1='.', node2=None, match=None,
1302 ignored=False, clean=False, unknown=False,
1303 ignored=False, clean=False, unknown=False,
1303 listsubrepos=False):
1304 listsubrepos=False):
1304 """return status of files between two nodes or node and working directory
1305 """return status of files between two nodes or node and working directory
1305
1306
1306 If node1 is None, use the first dirstate parent instead.
1307 If node1 is None, use the first dirstate parent instead.
1307 If node2 is None, compare node1 with working directory.
1308 If node2 is None, compare node1 with working directory.
1308 """
1309 """
1309
1310
1310 def mfmatches(ctx):
1311 def mfmatches(ctx):
1311 mf = ctx.manifest().copy()
1312 mf = ctx.manifest().copy()
1312 for fn in mf.keys():
1313 for fn in mf.keys():
1313 if not match(fn):
1314 if not match(fn):
1314 del mf[fn]
1315 del mf[fn]
1315 return mf
1316 return mf
1316
1317
1317 if isinstance(node1, context.changectx):
1318 if isinstance(node1, context.changectx):
1318 ctx1 = node1
1319 ctx1 = node1
1319 else:
1320 else:
1320 ctx1 = self[node1]
1321 ctx1 = self[node1]
1321 if isinstance(node2, context.changectx):
1322 if isinstance(node2, context.changectx):
1322 ctx2 = node2
1323 ctx2 = node2
1323 else:
1324 else:
1324 ctx2 = self[node2]
1325 ctx2 = self[node2]
1325
1326
1326 working = ctx2.rev() is None
1327 working = ctx2.rev() is None
1327 parentworking = working and ctx1 == self['.']
1328 parentworking = working and ctx1 == self['.']
1328 match = match or matchmod.always(self.root, self.getcwd())
1329 match = match or matchmod.always(self.root, self.getcwd())
1329 listignored, listclean, listunknown = ignored, clean, unknown
1330 listignored, listclean, listunknown = ignored, clean, unknown
1330
1331
1331 # load earliest manifest first for caching reasons
1332 # load earliest manifest first for caching reasons
1332 if not working and ctx2.rev() < ctx1.rev():
1333 if not working and ctx2.rev() < ctx1.rev():
1333 ctx2.manifest()
1334 ctx2.manifest()
1334
1335
1335 if not parentworking:
1336 if not parentworking:
1336 def bad(f, msg):
1337 def bad(f, msg):
1337 if f not in ctx1:
1338 if f not in ctx1:
1338 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1339 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1339 match.bad = bad
1340 match.bad = bad
1340
1341
1341 if working: # we need to scan the working dir
1342 if working: # we need to scan the working dir
1342 subrepos = []
1343 subrepos = []
1343 if '.hgsub' in self.dirstate:
1344 if '.hgsub' in self.dirstate:
1344 subrepos = ctx2.substate.keys()
1345 subrepos = ctx2.substate.keys()
1345 s = self.dirstate.status(match, subrepos, listignored,
1346 s = self.dirstate.status(match, subrepos, listignored,
1346 listclean, listunknown)
1347 listclean, listunknown)
1347 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1348 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1348
1349
1349 # check for any possibly clean files
1350 # check for any possibly clean files
1350 if parentworking and cmp:
1351 if parentworking and cmp:
1351 fixup = []
1352 fixup = []
1352 # do a full compare of any files that might have changed
1353 # do a full compare of any files that might have changed
1353 for f in sorted(cmp):
1354 for f in sorted(cmp):
1354 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1355 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1355 or ctx1[f].cmp(ctx2[f])):
1356 or ctx1[f].cmp(ctx2[f])):
1356 modified.append(f)
1357 modified.append(f)
1357 else:
1358 else:
1358 fixup.append(f)
1359 fixup.append(f)
1359
1360
1360 # update dirstate for files that are actually clean
1361 # update dirstate for files that are actually clean
1361 if fixup:
1362 if fixup:
1362 if listclean:
1363 if listclean:
1363 clean += fixup
1364 clean += fixup
1364
1365
1365 try:
1366 try:
1366 # updating the dirstate is optional
1367 # updating the dirstate is optional
1367 # so we don't wait on the lock
1368 # so we don't wait on the lock
1368 wlock = self.wlock(False)
1369 wlock = self.wlock(False)
1369 try:
1370 try:
1370 for f in fixup:
1371 for f in fixup:
1371 self.dirstate.normal(f)
1372 self.dirstate.normal(f)
1372 finally:
1373 finally:
1373 wlock.release()
1374 wlock.release()
1374 except error.LockError:
1375 except error.LockError:
1375 pass
1376 pass
1376
1377
1377 if not parentworking:
1378 if not parentworking:
1378 mf1 = mfmatches(ctx1)
1379 mf1 = mfmatches(ctx1)
1379 if working:
1380 if working:
1380 # we are comparing working dir against non-parent
1381 # we are comparing working dir against non-parent
1381 # generate a pseudo-manifest for the working dir
1382 # generate a pseudo-manifest for the working dir
1382 mf2 = mfmatches(self['.'])
1383 mf2 = mfmatches(self['.'])
1383 for f in cmp + modified + added:
1384 for f in cmp + modified + added:
1384 mf2[f] = None
1385 mf2[f] = None
1385 mf2.set(f, ctx2.flags(f))
1386 mf2.set(f, ctx2.flags(f))
1386 for f in removed:
1387 for f in removed:
1387 if f in mf2:
1388 if f in mf2:
1388 del mf2[f]
1389 del mf2[f]
1389 else:
1390 else:
1390 # we are comparing two revisions
1391 # we are comparing two revisions
1391 deleted, unknown, ignored = [], [], []
1392 deleted, unknown, ignored = [], [], []
1392 mf2 = mfmatches(ctx2)
1393 mf2 = mfmatches(ctx2)
1393
1394
1394 modified, added, clean = [], [], []
1395 modified, added, clean = [], [], []
1395 for fn in mf2:
1396 for fn in mf2:
1396 if fn in mf1:
1397 if fn in mf1:
1397 if (fn not in deleted and
1398 if (fn not in deleted and
1398 (mf1.flags(fn) != mf2.flags(fn) or
1399 (mf1.flags(fn) != mf2.flags(fn) or
1399 (mf1[fn] != mf2[fn] and
1400 (mf1[fn] != mf2[fn] and
1400 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1401 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1401 modified.append(fn)
1402 modified.append(fn)
1402 elif listclean:
1403 elif listclean:
1403 clean.append(fn)
1404 clean.append(fn)
1404 del mf1[fn]
1405 del mf1[fn]
1405 elif fn not in deleted:
1406 elif fn not in deleted:
1406 added.append(fn)
1407 added.append(fn)
1407 removed = mf1.keys()
1408 removed = mf1.keys()
1408
1409
1409 if working and modified and not self.dirstate._checklink:
1410 if working and modified and not self.dirstate._checklink:
1410 # Symlink placeholders may get non-symlink-like contents
1411 # Symlink placeholders may get non-symlink-like contents
1411 # via user error or dereferencing by NFS or Samba servers,
1412 # via user error or dereferencing by NFS or Samba servers,
1412 # so we filter out any placeholders that don't look like a
1413 # so we filter out any placeholders that don't look like a
1413 # symlink
1414 # symlink
1414 sane = []
1415 sane = []
1415 for f in modified:
1416 for f in modified:
1416 if ctx2.flags(f) == 'l':
1417 if ctx2.flags(f) == 'l':
1417 d = ctx2[f].data()
1418 d = ctx2[f].data()
1418 if len(d) >= 1024 or '\n' in d or util.binary(d):
1419 if len(d) >= 1024 or '\n' in d or util.binary(d):
1419 self.ui.debug('ignoring suspect symlink placeholder'
1420 self.ui.debug('ignoring suspect symlink placeholder'
1420 ' "%s"\n' % f)
1421 ' "%s"\n' % f)
1421 continue
1422 continue
1422 sane.append(f)
1423 sane.append(f)
1423 modified = sane
1424 modified = sane
1424
1425
1425 r = modified, added, removed, deleted, unknown, ignored, clean
1426 r = modified, added, removed, deleted, unknown, ignored, clean
1426
1427
1427 if listsubrepos:
1428 if listsubrepos:
1428 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1429 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1429 if working:
1430 if working:
1430 rev2 = None
1431 rev2 = None
1431 else:
1432 else:
1432 rev2 = ctx2.substate[subpath][1]
1433 rev2 = ctx2.substate[subpath][1]
1433 try:
1434 try:
1434 submatch = matchmod.narrowmatcher(subpath, match)
1435 submatch = matchmod.narrowmatcher(subpath, match)
1435 s = sub.status(rev2, match=submatch, ignored=listignored,
1436 s = sub.status(rev2, match=submatch, ignored=listignored,
1436 clean=listclean, unknown=listunknown,
1437 clean=listclean, unknown=listunknown,
1437 listsubrepos=True)
1438 listsubrepos=True)
1438 for rfiles, sfiles in zip(r, s):
1439 for rfiles, sfiles in zip(r, s):
1439 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1440 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1440 except error.LookupError:
1441 except error.LookupError:
1441 self.ui.status(_("skipping missing subrepository: %s\n")
1442 self.ui.status(_("skipping missing subrepository: %s\n")
1442 % subpath)
1443 % subpath)
1443
1444
1444 for l in r:
1445 for l in r:
1445 l.sort()
1446 l.sort()
1446 return r
1447 return r
1447
1448
1448 def heads(self, start=None):
1449 def heads(self, start=None):
1449 heads = self.changelog.heads(start)
1450 heads = self.changelog.heads(start)
1450 # sort the output in rev descending order
1451 # sort the output in rev descending order
1451 return sorted(heads, key=self.changelog.rev, reverse=True)
1452 return sorted(heads, key=self.changelog.rev, reverse=True)
1452
1453
1453 def branchheads(self, branch=None, start=None, closed=False):
1454 def branchheads(self, branch=None, start=None, closed=False):
1454 '''return a (possibly filtered) list of heads for the given branch
1455 '''return a (possibly filtered) list of heads for the given branch
1455
1456
1456 Heads are returned in topological order, from newest to oldest.
1457 Heads are returned in topological order, from newest to oldest.
1457 If branch is None, use the dirstate branch.
1458 If branch is None, use the dirstate branch.
1458 If start is not None, return only heads reachable from start.
1459 If start is not None, return only heads reachable from start.
1459 If closed is True, return heads that are marked as closed as well.
1460 If closed is True, return heads that are marked as closed as well.
1460 '''
1461 '''
1461 if branch is None:
1462 if branch is None:
1462 branch = self[None].branch()
1463 branch = self[None].branch()
1463 branches = self.branchmap()
1464 branches = self.branchmap()
1464 if branch not in branches:
1465 if branch not in branches:
1465 return []
1466 return []
1466 # the cache returns heads ordered lowest to highest
1467 # the cache returns heads ordered lowest to highest
1467 bheads = list(reversed(branches[branch]))
1468 bheads = list(reversed(branches[branch]))
1468 if start is not None:
1469 if start is not None:
1469 # filter out the heads that cannot be reached from startrev
1470 # filter out the heads that cannot be reached from startrev
1470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1471 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1471 bheads = [h for h in bheads if h in fbheads]
1472 bheads = [h for h in bheads if h in fbheads]
1472 if not closed:
1473 if not closed:
1473 bheads = [h for h in bheads if
1474 bheads = [h for h in bheads if
1474 ('close' not in self.changelog.read(h)[5])]
1475 ('close' not in self.changelog.read(h)[5])]
1475 return bheads
1476 return bheads
1476
1477
1477 def branches(self, nodes):
1478 def branches(self, nodes):
1478 if not nodes:
1479 if not nodes:
1479 nodes = [self.changelog.tip()]
1480 nodes = [self.changelog.tip()]
1480 b = []
1481 b = []
1481 for n in nodes:
1482 for n in nodes:
1482 t = n
1483 t = n
1483 while True:
1484 while True:
1484 p = self.changelog.parents(n)
1485 p = self.changelog.parents(n)
1485 if p[1] != nullid or p[0] == nullid:
1486 if p[1] != nullid or p[0] == nullid:
1486 b.append((t, n, p[0], p[1]))
1487 b.append((t, n, p[0], p[1]))
1487 break
1488 break
1488 n = p[0]
1489 n = p[0]
1489 return b
1490 return b
1490
1491
1491 def between(self, pairs):
1492 def between(self, pairs):
1492 r = []
1493 r = []
1493
1494
1494 for top, bottom in pairs:
1495 for top, bottom in pairs:
1495 n, l, i = top, [], 0
1496 n, l, i = top, [], 0
1496 f = 1
1497 f = 1
1497
1498
1498 while n != bottom and n != nullid:
1499 while n != bottom and n != nullid:
1499 p = self.changelog.parents(n)[0]
1500 p = self.changelog.parents(n)[0]
1500 if i == f:
1501 if i == f:
1501 l.append(n)
1502 l.append(n)
1502 f = f * 2
1503 f = f * 2
1503 n = p
1504 n = p
1504 i += 1
1505 i += 1
1505
1506
1506 r.append(l)
1507 r.append(l)
1507
1508
1508 return r
1509 return r
1509
1510
1510 def pull(self, remote, heads=None, force=False):
1511 def pull(self, remote, heads=None, force=False):
1511 lock = self.lock()
1512 lock = self.lock()
1512 try:
1513 try:
1513 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1514 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1514 force=force)
1515 force=force)
1515 common, fetch, rheads = tmp
1516 common, fetch, rheads = tmp
1516 if not fetch:
1517 if not fetch:
1517 self.ui.status(_("no changes found\n"))
1518 self.ui.status(_("no changes found\n"))
1518 added = []
1519 added = []
1519 result = 0
1520 result = 0
1520 else:
1521 else:
1521 if heads is None and list(common) == [nullid]:
1522 if heads is None and list(common) == [nullid]:
1522 self.ui.status(_("requesting all changes\n"))
1523 self.ui.status(_("requesting all changes\n"))
1523 elif heads is None and remote.capable('changegroupsubset'):
1524 elif heads is None and remote.capable('changegroupsubset'):
1524 # issue1320, avoid a race if remote changed after discovery
1525 # issue1320, avoid a race if remote changed after discovery
1525 heads = rheads
1526 heads = rheads
1526
1527
1527 if remote.capable('getbundle'):
1528 if remote.capable('getbundle'):
1528 cg = remote.getbundle('pull', common=common,
1529 cg = remote.getbundle('pull', common=common,
1529 heads=heads or rheads)
1530 heads=heads or rheads)
1530 elif heads is None:
1531 elif heads is None:
1531 cg = remote.changegroup(fetch, 'pull')
1532 cg = remote.changegroup(fetch, 'pull')
1532 elif not remote.capable('changegroupsubset'):
1533 elif not remote.capable('changegroupsubset'):
1533 raise util.Abort(_("partial pull cannot be done because "
1534 raise util.Abort(_("partial pull cannot be done because "
1534 "other repository doesn't support "
1535 "other repository doesn't support "
1535 "changegroupsubset."))
1536 "changegroupsubset."))
1536 else:
1537 else:
1537 cg = remote.changegroupsubset(fetch, heads, 'pull')
1538 cg = remote.changegroupsubset(fetch, heads, 'pull')
1538 clstart = len(self.changelog)
1539 clstart = len(self.changelog)
1539 result = self.addchangegroup(cg, 'pull', remote.url())
1540 result = self.addchangegroup(cg, 'pull', remote.url())
1540 clend = len(self.changelog)
1541 clend = len(self.changelog)
1541 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1542 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1542
1543
1543
1544
1544 # Get remote phases data from remote
1545 # Get remote phases data from remote
1545 remotephases = remote.listkeys('phases')
1546 remotephases = remote.listkeys('phases')
1546 publishing = bool(remotephases.get('publishing', False))
1547 publishing = bool(remotephases.get('publishing', False))
1547 if remotephases and not publishing:
1548 if remotephases and not publishing:
1548 # remote is new and unpublishing
1549 # remote is new and unpublishing
1549 subset = common + added
1550 subset = common + added
1550 rheads, rroots = phases.analyzeremotephases(self, subset,
1551 rheads, rroots = phases.analyzeremotephases(self, subset,
1551 remotephases)
1552 remotephases)
1552 for phase, boundary in enumerate(rheads):
1553 for phase, boundary in enumerate(rheads):
1553 phases.advanceboundary(self, phase, boundary)
1554 phases.advanceboundary(self, phase, boundary)
1554 else:
1555 else:
1555 # Remote is old or publishing all common changesets
1556 # Remote is old or publishing all common changesets
1556 # should be seen as public
1557 # should be seen as public
1557 phases.advanceboundary(self, 0, common + added)
1558 phases.advanceboundary(self, phases.public, common + added)
1558 finally:
1559 finally:
1559 lock.release()
1560 lock.release()
1560
1561
1561 return result
1562 return result
1562
1563
1563 def checkpush(self, force, revs):
1564 def checkpush(self, force, revs):
1564 """Extensions can override this function if additional checks have
1565 """Extensions can override this function if additional checks have
1565 to be performed before pushing, or call it if they override push
1566 to be performed before pushing, or call it if they override push
1566 command.
1567 command.
1567 """
1568 """
1568 pass
1569 pass
1569
1570
1570 def push(self, remote, force=False, revs=None, newbranch=False):
1571 def push(self, remote, force=False, revs=None, newbranch=False):
1571 '''Push outgoing changesets (limited by revs) from the current
1572 '''Push outgoing changesets (limited by revs) from the current
1572 repository to remote. Return an integer:
1573 repository to remote. Return an integer:
1573 - 0 means HTTP error *or* nothing to push
1574 - 0 means HTTP error *or* nothing to push
1574 - 1 means we pushed and remote head count is unchanged *or*
1575 - 1 means we pushed and remote head count is unchanged *or*
1575 we have outgoing changesets but refused to push
1576 we have outgoing changesets but refused to push
1576 - other values as described by addchangegroup()
1577 - other values as described by addchangegroup()
1577 '''
1578 '''
1578 # there are two ways to push to remote repo:
1579 # there are two ways to push to remote repo:
1579 #
1580 #
1580 # addchangegroup assumes local user can lock remote
1581 # addchangegroup assumes local user can lock remote
1581 # repo (local filesystem, old ssh servers).
1582 # repo (local filesystem, old ssh servers).
1582 #
1583 #
1583 # unbundle assumes local user cannot lock remote repo (new ssh
1584 # unbundle assumes local user cannot lock remote repo (new ssh
1584 # servers, http servers).
1585 # servers, http servers).
1585
1586
1586 self.checkpush(force, revs)
1587 self.checkpush(force, revs)
1587 lock = None
1588 lock = None
1588 unbundle = remote.capable('unbundle')
1589 unbundle = remote.capable('unbundle')
1589 if not unbundle:
1590 if not unbundle:
1590 lock = remote.lock()
1591 lock = remote.lock()
1591 try:
1592 try:
1592 # get local lock as we might write phase data
1593 # get local lock as we might write phase data
1593 locallock = self.lock()
1594 locallock = self.lock()
1594 try:
1595 try:
1595 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1596 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1596 revs, newbranch)
1597 revs, newbranch)
1597 ret = remote_heads
1598 ret = remote_heads
1598 # create a callback for addchangegroup.
1599 # create a callback for addchangegroup.
1599 # If will be used branch of the conditionnal too.
1600 # If will be used branch of the conditionnal too.
1600 if cg is not None:
1601 if cg is not None:
1601 if unbundle:
1602 if unbundle:
1602 # local repo finds heads on server, finds out what
1603 # local repo finds heads on server, finds out what
1603 # revs it must push. once revs transferred, if server
1604 # revs it must push. once revs transferred, if server
1604 # finds it has different heads (someone else won
1605 # finds it has different heads (someone else won
1605 # commit/push race), server aborts.
1606 # commit/push race), server aborts.
1606 if force:
1607 if force:
1607 remote_heads = ['force']
1608 remote_heads = ['force']
1608 # ssh: return remote's addchangegroup()
1609 # ssh: return remote's addchangegroup()
1609 # http: return remote's addchangegroup() or 0 for error
1610 # http: return remote's addchangegroup() or 0 for error
1610 ret = remote.unbundle(cg, remote_heads, 'push')
1611 ret = remote.unbundle(cg, remote_heads, 'push')
1611 else:
1612 else:
1612 # we return an integer indicating remote head count change
1613 # we return an integer indicating remote head count change
1613 ret = remote.addchangegroup(cg, 'push', self.url())
1614 ret = remote.addchangegroup(cg, 'push', self.url())
1614
1615
1615 # even when we don't push, exchanging phase data is useful
1616 # even when we don't push, exchanging phase data is useful
1616 remotephases = remote.listkeys('phases')
1617 remotephases = remote.listkeys('phases')
1617 if not remotephases: # old server or public only repo
1618 if not remotephases: # old server or public only repo
1618 phases.advanceboundary(self, 0, fut)
1619 phases.advanceboundary(self, phases.public, fut)
1619 # don't push any phase data as there is nothing to push
1620 # don't push any phase data as there is nothing to push
1620 else:
1621 else:
1621 ana = phases.analyzeremotephases(self, fut, remotephases)
1622 ana = phases.analyzeremotephases(self, fut, remotephases)
1622 rheads, rroots = ana
1623 rheads, rroots = ana
1623 ### Apply remote phase on local
1624 ### Apply remote phase on local
1624 if remotephases.get('publishing', False):
1625 if remotephases.get('publishing', False):
1625 phases.advanceboundary(self, 0, fut)
1626 phases.advanceboundary(self, phases.public, fut)
1626 else: # publish = False
1627 else: # publish = False
1627 for phase, rpheads in enumerate(rheads):
1628 for phase, rpheads in enumerate(rheads):
1628 phases.advanceboundary(self, phase, rpheads)
1629 phases.advanceboundary(self, phase, rpheads)
1629 ### Apply local phase on remote
1630 ### Apply local phase on remote
1630 #
1631 #
1631 # XXX If push failed we should use strict common and not
1632 # XXX If push failed we should use strict common and not
1632 # future to avoir pushing phase data on unknown changeset.
1633 # future to avoir pushing phase data on unknown changeset.
1633 # This is to done later.
1634 # This is to done later.
1634 futctx = [self[n] for n in fut if n != nullid]
1635 futctx = [self[n] for n in fut if n != nullid]
1635 for phase in phases.trackedphases[::-1]:
1636 for phase in phases.trackedphases[::-1]:
1636 prevphase = phase -1
1637 prevphase = phase -1
1637 # get all candidate for head in previous phase
1638 # get all candidate for head in previous phase
1638 inprev = [ctx for ctx in futctx
1639 inprev = [ctx for ctx in futctx
1639 if ctx.phase() == prevphase]
1640 if ctx.phase() == prevphase]
1640 for newremotehead in self.set('heads(%ld & (%ln::))',
1641 for newremotehead in self.set('heads(%ld & (%ln::))',
1641 inprev, rroots[phase]):
1642 inprev, rroots[phase]):
1642 r = remote.pushkey('phases',
1643 r = remote.pushkey('phases',
1643 newremotehead.hex(),
1644 newremotehead.hex(),
1644 str(phase), str(prevphase))
1645 str(phase), str(prevphase))
1645 if not r:
1646 if not r:
1646 self.ui.warn(_('updating phase of %s'
1647 self.ui.warn(_('updating phase of %s'
1647 'to %s failed!\n')
1648 'to %s failed!\n')
1648 % (newremotehead, prevphase))
1649 % (newremotehead, prevphase))
1649 finally:
1650 finally:
1650 locallock.release()
1651 locallock.release()
1651 finally:
1652 finally:
1652 if lock is not None:
1653 if lock is not None:
1653 lock.release()
1654 lock.release()
1654
1655
1655 self.ui.debug("checking for updated bookmarks\n")
1656 self.ui.debug("checking for updated bookmarks\n")
1656 rb = remote.listkeys('bookmarks')
1657 rb = remote.listkeys('bookmarks')
1657 for k in rb.keys():
1658 for k in rb.keys():
1658 if k in self._bookmarks:
1659 if k in self._bookmarks:
1659 nr, nl = rb[k], hex(self._bookmarks[k])
1660 nr, nl = rb[k], hex(self._bookmarks[k])
1660 if nr in self:
1661 if nr in self:
1661 cr = self[nr]
1662 cr = self[nr]
1662 cl = self[nl]
1663 cl = self[nl]
1663 if cl in cr.descendants():
1664 if cl in cr.descendants():
1664 r = remote.pushkey('bookmarks', k, nr, nl)
1665 r = remote.pushkey('bookmarks', k, nr, nl)
1665 if r:
1666 if r:
1666 self.ui.status(_("updating bookmark %s\n") % k)
1667 self.ui.status(_("updating bookmark %s\n") % k)
1667 else:
1668 else:
1668 self.ui.warn(_('updating bookmark %s'
1669 self.ui.warn(_('updating bookmark %s'
1669 ' failed!\n') % k)
1670 ' failed!\n') % k)
1670
1671
1671 return ret
1672 return ret
1672
1673
1673 def changegroupinfo(self, nodes, source):
1674 def changegroupinfo(self, nodes, source):
1674 if self.ui.verbose or source == 'bundle':
1675 if self.ui.verbose or source == 'bundle':
1675 self.ui.status(_("%d changesets found\n") % len(nodes))
1676 self.ui.status(_("%d changesets found\n") % len(nodes))
1676 if self.ui.debugflag:
1677 if self.ui.debugflag:
1677 self.ui.debug("list of changesets:\n")
1678 self.ui.debug("list of changesets:\n")
1678 for node in nodes:
1679 for node in nodes:
1679 self.ui.debug("%s\n" % hex(node))
1680 self.ui.debug("%s\n" % hex(node))
1680
1681
1681 def changegroupsubset(self, bases, heads, source):
1682 def changegroupsubset(self, bases, heads, source):
1682 """Compute a changegroup consisting of all the nodes that are
1683 """Compute a changegroup consisting of all the nodes that are
1683 descendants of any of the bases and ancestors of any of the heads.
1684 descendants of any of the bases and ancestors of any of the heads.
1684 Return a chunkbuffer object whose read() method will return
1685 Return a chunkbuffer object whose read() method will return
1685 successive changegroup chunks.
1686 successive changegroup chunks.
1686
1687
1687 It is fairly complex as determining which filenodes and which
1688 It is fairly complex as determining which filenodes and which
1688 manifest nodes need to be included for the changeset to be complete
1689 manifest nodes need to be included for the changeset to be complete
1689 is non-trivial.
1690 is non-trivial.
1690
1691
1691 Another wrinkle is doing the reverse, figuring out which changeset in
1692 Another wrinkle is doing the reverse, figuring out which changeset in
1692 the changegroup a particular filenode or manifestnode belongs to.
1693 the changegroup a particular filenode or manifestnode belongs to.
1693 """
1694 """
1694 cl = self.changelog
1695 cl = self.changelog
1695 if not bases:
1696 if not bases:
1696 bases = [nullid]
1697 bases = [nullid]
1697 csets, bases, heads = cl.nodesbetween(bases, heads)
1698 csets, bases, heads = cl.nodesbetween(bases, heads)
1698 # We assume that all ancestors of bases are known
1699 # We assume that all ancestors of bases are known
1699 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1700 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1700 return self._changegroupsubset(common, csets, heads, source)
1701 return self._changegroupsubset(common, csets, heads, source)
1701
1702
1702 def getbundle(self, source, heads=None, common=None):
1703 def getbundle(self, source, heads=None, common=None):
1703 """Like changegroupsubset, but returns the set difference between the
1704 """Like changegroupsubset, but returns the set difference between the
1704 ancestors of heads and the ancestors common.
1705 ancestors of heads and the ancestors common.
1705
1706
1706 If heads is None, use the local heads. If common is None, use [nullid].
1707 If heads is None, use the local heads. If common is None, use [nullid].
1707
1708
1708 The nodes in common might not all be known locally due to the way the
1709 The nodes in common might not all be known locally due to the way the
1709 current discovery protocol works.
1710 current discovery protocol works.
1710 """
1711 """
1711 cl = self.changelog
1712 cl = self.changelog
1712 if common:
1713 if common:
1713 nm = cl.nodemap
1714 nm = cl.nodemap
1714 common = [n for n in common if n in nm]
1715 common = [n for n in common if n in nm]
1715 else:
1716 else:
1716 common = [nullid]
1717 common = [nullid]
1717 if not heads:
1718 if not heads:
1718 heads = cl.heads()
1719 heads = cl.heads()
1719 common, missing = cl.findcommonmissing(common, heads)
1720 common, missing = cl.findcommonmissing(common, heads)
1720 if not missing:
1721 if not missing:
1721 return None
1722 return None
1722 return self._changegroupsubset(common, missing, heads, source)
1723 return self._changegroupsubset(common, missing, heads, source)
1723
1724
1724 def _changegroupsubset(self, commonrevs, csets, heads, source):
1725 def _changegroupsubset(self, commonrevs, csets, heads, source):
1725
1726
1726 cl = self.changelog
1727 cl = self.changelog
1727 mf = self.manifest
1728 mf = self.manifest
1728 mfs = {} # needed manifests
1729 mfs = {} # needed manifests
1729 fnodes = {} # needed file nodes
1730 fnodes = {} # needed file nodes
1730 changedfiles = set()
1731 changedfiles = set()
1731 fstate = ['', {}]
1732 fstate = ['', {}]
1732 count = [0]
1733 count = [0]
1733
1734
1734 # can we go through the fast path ?
1735 # can we go through the fast path ?
1735 heads.sort()
1736 heads.sort()
1736 if heads == sorted(self.heads()):
1737 if heads == sorted(self.heads()):
1737 return self._changegroup(csets, source)
1738 return self._changegroup(csets, source)
1738
1739
1739 # slow path
1740 # slow path
1740 self.hook('preoutgoing', throw=True, source=source)
1741 self.hook('preoutgoing', throw=True, source=source)
1741 self.changegroupinfo(csets, source)
1742 self.changegroupinfo(csets, source)
1742
1743
1743 # filter any nodes that claim to be part of the known set
1744 # filter any nodes that claim to be part of the known set
1744 def prune(revlog, missing):
1745 def prune(revlog, missing):
1745 return [n for n in missing
1746 return [n for n in missing
1746 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1747 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1747
1748
1748 def lookup(revlog, x):
1749 def lookup(revlog, x):
1749 if revlog == cl:
1750 if revlog == cl:
1750 c = cl.read(x)
1751 c = cl.read(x)
1751 changedfiles.update(c[3])
1752 changedfiles.update(c[3])
1752 mfs.setdefault(c[0], x)
1753 mfs.setdefault(c[0], x)
1753 count[0] += 1
1754 count[0] += 1
1754 self.ui.progress(_('bundling'), count[0],
1755 self.ui.progress(_('bundling'), count[0],
1755 unit=_('changesets'), total=len(csets))
1756 unit=_('changesets'), total=len(csets))
1756 return x
1757 return x
1757 elif revlog == mf:
1758 elif revlog == mf:
1758 clnode = mfs[x]
1759 clnode = mfs[x]
1759 mdata = mf.readfast(x)
1760 mdata = mf.readfast(x)
1760 for f in changedfiles:
1761 for f in changedfiles:
1761 if f in mdata:
1762 if f in mdata:
1762 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1763 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1763 count[0] += 1
1764 count[0] += 1
1764 self.ui.progress(_('bundling'), count[0],
1765 self.ui.progress(_('bundling'), count[0],
1765 unit=_('manifests'), total=len(mfs))
1766 unit=_('manifests'), total=len(mfs))
1766 return mfs[x]
1767 return mfs[x]
1767 else:
1768 else:
1768 self.ui.progress(
1769 self.ui.progress(
1769 _('bundling'), count[0], item=fstate[0],
1770 _('bundling'), count[0], item=fstate[0],
1770 unit=_('files'), total=len(changedfiles))
1771 unit=_('files'), total=len(changedfiles))
1771 return fstate[1][x]
1772 return fstate[1][x]
1772
1773
1773 bundler = changegroup.bundle10(lookup)
1774 bundler = changegroup.bundle10(lookup)
1774 reorder = self.ui.config('bundle', 'reorder', 'auto')
1775 reorder = self.ui.config('bundle', 'reorder', 'auto')
1775 if reorder == 'auto':
1776 if reorder == 'auto':
1776 reorder = None
1777 reorder = None
1777 else:
1778 else:
1778 reorder = util.parsebool(reorder)
1779 reorder = util.parsebool(reorder)
1779
1780
1780 def gengroup():
1781 def gengroup():
1781 # Create a changenode group generator that will call our functions
1782 # Create a changenode group generator that will call our functions
1782 # back to lookup the owning changenode and collect information.
1783 # back to lookup the owning changenode and collect information.
1783 for chunk in cl.group(csets, bundler, reorder=reorder):
1784 for chunk in cl.group(csets, bundler, reorder=reorder):
1784 yield chunk
1785 yield chunk
1785 self.ui.progress(_('bundling'), None)
1786 self.ui.progress(_('bundling'), None)
1786
1787
1787 # Create a generator for the manifestnodes that calls our lookup
1788 # Create a generator for the manifestnodes that calls our lookup
1788 # and data collection functions back.
1789 # and data collection functions back.
1789 count[0] = 0
1790 count[0] = 0
1790 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1791 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1791 yield chunk
1792 yield chunk
1792 self.ui.progress(_('bundling'), None)
1793 self.ui.progress(_('bundling'), None)
1793
1794
1794 mfs.clear()
1795 mfs.clear()
1795
1796
1796 # Go through all our files in order sorted by name.
1797 # Go through all our files in order sorted by name.
1797 count[0] = 0
1798 count[0] = 0
1798 for fname in sorted(changedfiles):
1799 for fname in sorted(changedfiles):
1799 filerevlog = self.file(fname)
1800 filerevlog = self.file(fname)
1800 if not len(filerevlog):
1801 if not len(filerevlog):
1801 raise util.Abort(_("empty or missing revlog for %s") % fname)
1802 raise util.Abort(_("empty or missing revlog for %s") % fname)
1802 fstate[0] = fname
1803 fstate[0] = fname
1803 fstate[1] = fnodes.pop(fname, {})
1804 fstate[1] = fnodes.pop(fname, {})
1804
1805
1805 nodelist = prune(filerevlog, fstate[1])
1806 nodelist = prune(filerevlog, fstate[1])
1806 if nodelist:
1807 if nodelist:
1807 count[0] += 1
1808 count[0] += 1
1808 yield bundler.fileheader(fname)
1809 yield bundler.fileheader(fname)
1809 for chunk in filerevlog.group(nodelist, bundler, reorder):
1810 for chunk in filerevlog.group(nodelist, bundler, reorder):
1810 yield chunk
1811 yield chunk
1811
1812
1812 # Signal that no more groups are left.
1813 # Signal that no more groups are left.
1813 yield bundler.close()
1814 yield bundler.close()
1814 self.ui.progress(_('bundling'), None)
1815 self.ui.progress(_('bundling'), None)
1815
1816
1816 if csets:
1817 if csets:
1817 self.hook('outgoing', node=hex(csets[0]), source=source)
1818 self.hook('outgoing', node=hex(csets[0]), source=source)
1818
1819
1819 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1820 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1820
1821
1821 def changegroup(self, basenodes, source):
1822 def changegroup(self, basenodes, source):
1822 # to avoid a race we use changegroupsubset() (issue1320)
1823 # to avoid a race we use changegroupsubset() (issue1320)
1823 return self.changegroupsubset(basenodes, self.heads(), source)
1824 return self.changegroupsubset(basenodes, self.heads(), source)
1824
1825
1825 def _changegroup(self, nodes, source):
1826 def _changegroup(self, nodes, source):
1826 """Compute the changegroup of all nodes that we have that a recipient
1827 """Compute the changegroup of all nodes that we have that a recipient
1827 doesn't. Return a chunkbuffer object whose read() method will return
1828 doesn't. Return a chunkbuffer object whose read() method will return
1828 successive changegroup chunks.
1829 successive changegroup chunks.
1829
1830
1830 This is much easier than the previous function as we can assume that
1831 This is much easier than the previous function as we can assume that
1831 the recipient has any changenode we aren't sending them.
1832 the recipient has any changenode we aren't sending them.
1832
1833
1833 nodes is the set of nodes to send"""
1834 nodes is the set of nodes to send"""
1834
1835
1835 cl = self.changelog
1836 cl = self.changelog
1836 mf = self.manifest
1837 mf = self.manifest
1837 mfs = {}
1838 mfs = {}
1838 changedfiles = set()
1839 changedfiles = set()
1839 fstate = ['']
1840 fstate = ['']
1840 count = [0]
1841 count = [0]
1841
1842
1842 self.hook('preoutgoing', throw=True, source=source)
1843 self.hook('preoutgoing', throw=True, source=source)
1843 self.changegroupinfo(nodes, source)
1844 self.changegroupinfo(nodes, source)
1844
1845
1845 revset = set([cl.rev(n) for n in nodes])
1846 revset = set([cl.rev(n) for n in nodes])
1846
1847
1847 def gennodelst(log):
1848 def gennodelst(log):
1848 return [log.node(r) for r in log if log.linkrev(r) in revset]
1849 return [log.node(r) for r in log if log.linkrev(r) in revset]
1849
1850
1850 def lookup(revlog, x):
1851 def lookup(revlog, x):
1851 if revlog == cl:
1852 if revlog == cl:
1852 c = cl.read(x)
1853 c = cl.read(x)
1853 changedfiles.update(c[3])
1854 changedfiles.update(c[3])
1854 mfs.setdefault(c[0], x)
1855 mfs.setdefault(c[0], x)
1855 count[0] += 1
1856 count[0] += 1
1856 self.ui.progress(_('bundling'), count[0],
1857 self.ui.progress(_('bundling'), count[0],
1857 unit=_('changesets'), total=len(nodes))
1858 unit=_('changesets'), total=len(nodes))
1858 return x
1859 return x
1859 elif revlog == mf:
1860 elif revlog == mf:
1860 count[0] += 1
1861 count[0] += 1
1861 self.ui.progress(_('bundling'), count[0],
1862 self.ui.progress(_('bundling'), count[0],
1862 unit=_('manifests'), total=len(mfs))
1863 unit=_('manifests'), total=len(mfs))
1863 return cl.node(revlog.linkrev(revlog.rev(x)))
1864 return cl.node(revlog.linkrev(revlog.rev(x)))
1864 else:
1865 else:
1865 self.ui.progress(
1866 self.ui.progress(
1866 _('bundling'), count[0], item=fstate[0],
1867 _('bundling'), count[0], item=fstate[0],
1867 total=len(changedfiles), unit=_('files'))
1868 total=len(changedfiles), unit=_('files'))
1868 return cl.node(revlog.linkrev(revlog.rev(x)))
1869 return cl.node(revlog.linkrev(revlog.rev(x)))
1869
1870
1870 bundler = changegroup.bundle10(lookup)
1871 bundler = changegroup.bundle10(lookup)
1871 reorder = self.ui.config('bundle', 'reorder', 'auto')
1872 reorder = self.ui.config('bundle', 'reorder', 'auto')
1872 if reorder == 'auto':
1873 if reorder == 'auto':
1873 reorder = None
1874 reorder = None
1874 else:
1875 else:
1875 reorder = util.parsebool(reorder)
1876 reorder = util.parsebool(reorder)
1876
1877
1877 def gengroup():
1878 def gengroup():
1878 '''yield a sequence of changegroup chunks (strings)'''
1879 '''yield a sequence of changegroup chunks (strings)'''
1879 # construct a list of all changed files
1880 # construct a list of all changed files
1880
1881
1881 for chunk in cl.group(nodes, bundler, reorder=reorder):
1882 for chunk in cl.group(nodes, bundler, reorder=reorder):
1882 yield chunk
1883 yield chunk
1883 self.ui.progress(_('bundling'), None)
1884 self.ui.progress(_('bundling'), None)
1884
1885
1885 count[0] = 0
1886 count[0] = 0
1886 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1887 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1887 yield chunk
1888 yield chunk
1888 self.ui.progress(_('bundling'), None)
1889 self.ui.progress(_('bundling'), None)
1889
1890
1890 count[0] = 0
1891 count[0] = 0
1891 for fname in sorted(changedfiles):
1892 for fname in sorted(changedfiles):
1892 filerevlog = self.file(fname)
1893 filerevlog = self.file(fname)
1893 if not len(filerevlog):
1894 if not len(filerevlog):
1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1895 raise util.Abort(_("empty or missing revlog for %s") % fname)
1895 fstate[0] = fname
1896 fstate[0] = fname
1896 nodelist = gennodelst(filerevlog)
1897 nodelist = gennodelst(filerevlog)
1897 if nodelist:
1898 if nodelist:
1898 count[0] += 1
1899 count[0] += 1
1899 yield bundler.fileheader(fname)
1900 yield bundler.fileheader(fname)
1900 for chunk in filerevlog.group(nodelist, bundler, reorder):
1901 for chunk in filerevlog.group(nodelist, bundler, reorder):
1901 yield chunk
1902 yield chunk
1902 yield bundler.close()
1903 yield bundler.close()
1903 self.ui.progress(_('bundling'), None)
1904 self.ui.progress(_('bundling'), None)
1904
1905
1905 if nodes:
1906 if nodes:
1906 self.hook('outgoing', node=hex(nodes[0]), source=source)
1907 self.hook('outgoing', node=hex(nodes[0]), source=source)
1907
1908
1908 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1909 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1909
1910
1910 def addchangegroup(self, source, srctype, url, emptyok=False):
1911 def addchangegroup(self, source, srctype, url, emptyok=False):
1911 """Add the changegroup returned by source.read() to this repo.
1912 """Add the changegroup returned by source.read() to this repo.
1912 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1913 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1913 the URL of the repo where this changegroup is coming from.
1914 the URL of the repo where this changegroup is coming from.
1914
1915
1915 Return an integer summarizing the change to this repo:
1916 Return an integer summarizing the change to this repo:
1916 - nothing changed or no source: 0
1917 - nothing changed or no source: 0
1917 - more heads than before: 1+added heads (2..n)
1918 - more heads than before: 1+added heads (2..n)
1918 - fewer heads than before: -1-removed heads (-2..-n)
1919 - fewer heads than before: -1-removed heads (-2..-n)
1919 - number of heads stays the same: 1
1920 - number of heads stays the same: 1
1920 """
1921 """
1921 def csmap(x):
1922 def csmap(x):
1922 self.ui.debug("add changeset %s\n" % short(x))
1923 self.ui.debug("add changeset %s\n" % short(x))
1923 return len(cl)
1924 return len(cl)
1924
1925
1925 def revmap(x):
1926 def revmap(x):
1926 return cl.rev(x)
1927 return cl.rev(x)
1927
1928
1928 if not source:
1929 if not source:
1929 return 0
1930 return 0
1930
1931
1931 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1932 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1932
1933
1933 changesets = files = revisions = 0
1934 changesets = files = revisions = 0
1934 efiles = set()
1935 efiles = set()
1935
1936
1936 # write changelog data to temp files so concurrent readers will not see
1937 # write changelog data to temp files so concurrent readers will not see
1937 # inconsistent view
1938 # inconsistent view
1938 cl = self.changelog
1939 cl = self.changelog
1939 cl.delayupdate()
1940 cl.delayupdate()
1940 oldheads = cl.heads()
1941 oldheads = cl.heads()
1941
1942
1942 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1943 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1943 try:
1944 try:
1944 trp = weakref.proxy(tr)
1945 trp = weakref.proxy(tr)
1945 # pull off the changeset group
1946 # pull off the changeset group
1946 self.ui.status(_("adding changesets\n"))
1947 self.ui.status(_("adding changesets\n"))
1947 clstart = len(cl)
1948 clstart = len(cl)
1948 class prog(object):
1949 class prog(object):
1949 step = _('changesets')
1950 step = _('changesets')
1950 count = 1
1951 count = 1
1951 ui = self.ui
1952 ui = self.ui
1952 total = None
1953 total = None
1953 def __call__(self):
1954 def __call__(self):
1954 self.ui.progress(self.step, self.count, unit=_('chunks'),
1955 self.ui.progress(self.step, self.count, unit=_('chunks'),
1955 total=self.total)
1956 total=self.total)
1956 self.count += 1
1957 self.count += 1
1957 pr = prog()
1958 pr = prog()
1958 source.callback = pr
1959 source.callback = pr
1959
1960
1960 source.changelogheader()
1961 source.changelogheader()
1961 if (cl.addgroup(source, csmap, trp) is None
1962 if (cl.addgroup(source, csmap, trp) is None
1962 and not emptyok):
1963 and not emptyok):
1963 raise util.Abort(_("received changelog group is empty"))
1964 raise util.Abort(_("received changelog group is empty"))
1964 clend = len(cl)
1965 clend = len(cl)
1965 changesets = clend - clstart
1966 changesets = clend - clstart
1966 for c in xrange(clstart, clend):
1967 for c in xrange(clstart, clend):
1967 efiles.update(self[c].files())
1968 efiles.update(self[c].files())
1968 efiles = len(efiles)
1969 efiles = len(efiles)
1969 self.ui.progress(_('changesets'), None)
1970 self.ui.progress(_('changesets'), None)
1970
1971
1971 # pull off the manifest group
1972 # pull off the manifest group
1972 self.ui.status(_("adding manifests\n"))
1973 self.ui.status(_("adding manifests\n"))
1973 pr.step = _('manifests')
1974 pr.step = _('manifests')
1974 pr.count = 1
1975 pr.count = 1
1975 pr.total = changesets # manifests <= changesets
1976 pr.total = changesets # manifests <= changesets
1976 # no need to check for empty manifest group here:
1977 # no need to check for empty manifest group here:
1977 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1978 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1978 # no new manifest will be created and the manifest group will
1979 # no new manifest will be created and the manifest group will
1979 # be empty during the pull
1980 # be empty during the pull
1980 source.manifestheader()
1981 source.manifestheader()
1981 self.manifest.addgroup(source, revmap, trp)
1982 self.manifest.addgroup(source, revmap, trp)
1982 self.ui.progress(_('manifests'), None)
1983 self.ui.progress(_('manifests'), None)
1983
1984
1984 needfiles = {}
1985 needfiles = {}
1985 if self.ui.configbool('server', 'validate', default=False):
1986 if self.ui.configbool('server', 'validate', default=False):
1986 # validate incoming csets have their manifests
1987 # validate incoming csets have their manifests
1987 for cset in xrange(clstart, clend):
1988 for cset in xrange(clstart, clend):
1988 mfest = self.changelog.read(self.changelog.node(cset))[0]
1989 mfest = self.changelog.read(self.changelog.node(cset))[0]
1989 mfest = self.manifest.readdelta(mfest)
1990 mfest = self.manifest.readdelta(mfest)
1990 # store file nodes we must see
1991 # store file nodes we must see
1991 for f, n in mfest.iteritems():
1992 for f, n in mfest.iteritems():
1992 needfiles.setdefault(f, set()).add(n)
1993 needfiles.setdefault(f, set()).add(n)
1993
1994
1994 # process the files
1995 # process the files
1995 self.ui.status(_("adding file changes\n"))
1996 self.ui.status(_("adding file changes\n"))
1996 pr.step = _('files')
1997 pr.step = _('files')
1997 pr.count = 1
1998 pr.count = 1
1998 pr.total = efiles
1999 pr.total = efiles
1999 source.callback = None
2000 source.callback = None
2000
2001
2001 while True:
2002 while True:
2002 chunkdata = source.filelogheader()
2003 chunkdata = source.filelogheader()
2003 if not chunkdata:
2004 if not chunkdata:
2004 break
2005 break
2005 f = chunkdata["filename"]
2006 f = chunkdata["filename"]
2006 self.ui.debug("adding %s revisions\n" % f)
2007 self.ui.debug("adding %s revisions\n" % f)
2007 pr()
2008 pr()
2008 fl = self.file(f)
2009 fl = self.file(f)
2009 o = len(fl)
2010 o = len(fl)
2010 if fl.addgroup(source, revmap, trp) is None:
2011 if fl.addgroup(source, revmap, trp) is None:
2011 raise util.Abort(_("received file revlog group is empty"))
2012 raise util.Abort(_("received file revlog group is empty"))
2012 revisions += len(fl) - o
2013 revisions += len(fl) - o
2013 files += 1
2014 files += 1
2014 if f in needfiles:
2015 if f in needfiles:
2015 needs = needfiles[f]
2016 needs = needfiles[f]
2016 for new in xrange(o, len(fl)):
2017 for new in xrange(o, len(fl)):
2017 n = fl.node(new)
2018 n = fl.node(new)
2018 if n in needs:
2019 if n in needs:
2019 needs.remove(n)
2020 needs.remove(n)
2020 if not needs:
2021 if not needs:
2021 del needfiles[f]
2022 del needfiles[f]
2022 self.ui.progress(_('files'), None)
2023 self.ui.progress(_('files'), None)
2023
2024
2024 for f, needs in needfiles.iteritems():
2025 for f, needs in needfiles.iteritems():
2025 fl = self.file(f)
2026 fl = self.file(f)
2026 for n in needs:
2027 for n in needs:
2027 try:
2028 try:
2028 fl.rev(n)
2029 fl.rev(n)
2029 except error.LookupError:
2030 except error.LookupError:
2030 raise util.Abort(
2031 raise util.Abort(
2031 _('missing file data for %s:%s - run hg verify') %
2032 _('missing file data for %s:%s - run hg verify') %
2032 (f, hex(n)))
2033 (f, hex(n)))
2033
2034
2034 dh = 0
2035 dh = 0
2035 if oldheads:
2036 if oldheads:
2036 heads = cl.heads()
2037 heads = cl.heads()
2037 dh = len(heads) - len(oldheads)
2038 dh = len(heads) - len(oldheads)
2038 for h in heads:
2039 for h in heads:
2039 if h not in oldheads and 'close' in self[h].extra():
2040 if h not in oldheads and 'close' in self[h].extra():
2040 dh -= 1
2041 dh -= 1
2041 htext = ""
2042 htext = ""
2042 if dh:
2043 if dh:
2043 htext = _(" (%+d heads)") % dh
2044 htext = _(" (%+d heads)") % dh
2044
2045
2045 self.ui.status(_("added %d changesets"
2046 self.ui.status(_("added %d changesets"
2046 " with %d changes to %d files%s\n")
2047 " with %d changes to %d files%s\n")
2047 % (changesets, revisions, files, htext))
2048 % (changesets, revisions, files, htext))
2048
2049
2049 if changesets > 0:
2050 if changesets > 0:
2050 p = lambda: cl.writepending() and self.root or ""
2051 p = lambda: cl.writepending() and self.root or ""
2051 self.hook('pretxnchangegroup', throw=True,
2052 self.hook('pretxnchangegroup', throw=True,
2052 node=hex(cl.node(clstart)), source=srctype,
2053 node=hex(cl.node(clstart)), source=srctype,
2053 url=url, pending=p)
2054 url=url, pending=p)
2054
2055
2055 added = [cl.node(r) for r in xrange(clstart, clend)]
2056 added = [cl.node(r) for r in xrange(clstart, clend)]
2056 publishing = self.ui.configbool('phases', 'publish', True)
2057 publishing = self.ui.configbool('phases', 'publish', True)
2057 if publishing and srctype == 'push':
2058 if publishing and srctype == 'push':
2058 # Old server can not push the boundary themself.
2059 # Old server can not push the boundary themself.
2059 # This clause ensure pushed changeset are alway marked as public
2060 # This clause ensure pushed changeset are alway marked as public
2060 phases.advanceboundary(self, 0, added)
2061 phases.advanceboundary(self, phases.public, added)
2061 elif srctype != 'strip': # strip should not touch boundary at all
2062 elif srctype != 'strip': # strip should not touch boundary at all
2062 phases.retractboundary(self, 1, added)
2063 phases.retractboundary(self, phases.draft, added)
2063
2064
2064 # make changelog see real files again
2065 # make changelog see real files again
2065 cl.finalize(trp)
2066 cl.finalize(trp)
2066
2067
2067 tr.close()
2068 tr.close()
2068
2069
2069 if changesets > 0:
2070 if changesets > 0:
2070 def runhooks():
2071 def runhooks():
2071 # forcefully update the on-disk branch cache
2072 # forcefully update the on-disk branch cache
2072 self.ui.debug("updating the branch cache\n")
2073 self.ui.debug("updating the branch cache\n")
2073 self.updatebranchcache()
2074 self.updatebranchcache()
2074 self.hook("changegroup", node=hex(cl.node(clstart)),
2075 self.hook("changegroup", node=hex(cl.node(clstart)),
2075 source=srctype, url=url)
2076 source=srctype, url=url)
2076
2077
2077 for n in added:
2078 for n in added:
2078 self.hook("incoming", node=hex(n), source=srctype,
2079 self.hook("incoming", node=hex(n), source=srctype,
2079 url=url)
2080 url=url)
2080 self._afterlock(runhooks)
2081 self._afterlock(runhooks)
2081
2082
2082 finally:
2083 finally:
2083 tr.release()
2084 tr.release()
2084 # never return 0 here:
2085 # never return 0 here:
2085 if dh < 0:
2086 if dh < 0:
2086 return dh - 1
2087 return dh - 1
2087 else:
2088 else:
2088 return dh + 1
2089 return dh + 1
2089
2090
2090 def stream_in(self, remote, requirements):
2091 def stream_in(self, remote, requirements):
2091 lock = self.lock()
2092 lock = self.lock()
2092 try:
2093 try:
2093 fp = remote.stream_out()
2094 fp = remote.stream_out()
2094 l = fp.readline()
2095 l = fp.readline()
2095 try:
2096 try:
2096 resp = int(l)
2097 resp = int(l)
2097 except ValueError:
2098 except ValueError:
2098 raise error.ResponseError(
2099 raise error.ResponseError(
2099 _('Unexpected response from remote server:'), l)
2100 _('Unexpected response from remote server:'), l)
2100 if resp == 1:
2101 if resp == 1:
2101 raise util.Abort(_('operation forbidden by server'))
2102 raise util.Abort(_('operation forbidden by server'))
2102 elif resp == 2:
2103 elif resp == 2:
2103 raise util.Abort(_('locking the remote repository failed'))
2104 raise util.Abort(_('locking the remote repository failed'))
2104 elif resp != 0:
2105 elif resp != 0:
2105 raise util.Abort(_('the server sent an unknown error code'))
2106 raise util.Abort(_('the server sent an unknown error code'))
2106 self.ui.status(_('streaming all changes\n'))
2107 self.ui.status(_('streaming all changes\n'))
2107 l = fp.readline()
2108 l = fp.readline()
2108 try:
2109 try:
2109 total_files, total_bytes = map(int, l.split(' ', 1))
2110 total_files, total_bytes = map(int, l.split(' ', 1))
2110 except (ValueError, TypeError):
2111 except (ValueError, TypeError):
2111 raise error.ResponseError(
2112 raise error.ResponseError(
2112 _('Unexpected response from remote server:'), l)
2113 _('Unexpected response from remote server:'), l)
2113 self.ui.status(_('%d files to transfer, %s of data\n') %
2114 self.ui.status(_('%d files to transfer, %s of data\n') %
2114 (total_files, util.bytecount(total_bytes)))
2115 (total_files, util.bytecount(total_bytes)))
2115 start = time.time()
2116 start = time.time()
2116 for i in xrange(total_files):
2117 for i in xrange(total_files):
2117 # XXX doesn't support '\n' or '\r' in filenames
2118 # XXX doesn't support '\n' or '\r' in filenames
2118 l = fp.readline()
2119 l = fp.readline()
2119 try:
2120 try:
2120 name, size = l.split('\0', 1)
2121 name, size = l.split('\0', 1)
2121 size = int(size)
2122 size = int(size)
2122 except (ValueError, TypeError):
2123 except (ValueError, TypeError):
2123 raise error.ResponseError(
2124 raise error.ResponseError(
2124 _('Unexpected response from remote server:'), l)
2125 _('Unexpected response from remote server:'), l)
2125 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2126 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2126 # for backwards compat, name was partially encoded
2127 # for backwards compat, name was partially encoded
2127 ofp = self.sopener(store.decodedir(name), 'w')
2128 ofp = self.sopener(store.decodedir(name), 'w')
2128 for chunk in util.filechunkiter(fp, limit=size):
2129 for chunk in util.filechunkiter(fp, limit=size):
2129 ofp.write(chunk)
2130 ofp.write(chunk)
2130 ofp.close()
2131 ofp.close()
2131 elapsed = time.time() - start
2132 elapsed = time.time() - start
2132 if elapsed <= 0:
2133 if elapsed <= 0:
2133 elapsed = 0.001
2134 elapsed = 0.001
2134 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2135 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2135 (util.bytecount(total_bytes), elapsed,
2136 (util.bytecount(total_bytes), elapsed,
2136 util.bytecount(total_bytes / elapsed)))
2137 util.bytecount(total_bytes / elapsed)))
2137
2138
2138 # new requirements = old non-format requirements + new format-related
2139 # new requirements = old non-format requirements + new format-related
2139 # requirements from the streamed-in repository
2140 # requirements from the streamed-in repository
2140 requirements.update(set(self.requirements) - self.supportedformats)
2141 requirements.update(set(self.requirements) - self.supportedformats)
2141 self._applyrequirements(requirements)
2142 self._applyrequirements(requirements)
2142 self._writerequirements()
2143 self._writerequirements()
2143
2144
2144 self.invalidate()
2145 self.invalidate()
2145 return len(self.heads()) + 1
2146 return len(self.heads()) + 1
2146 finally:
2147 finally:
2147 lock.release()
2148 lock.release()
2148
2149
2149 def clone(self, remote, heads=[], stream=False):
2150 def clone(self, remote, heads=[], stream=False):
2150 '''clone remote repository.
2151 '''clone remote repository.
2151
2152
2152 keyword arguments:
2153 keyword arguments:
2153 heads: list of revs to clone (forces use of pull)
2154 heads: list of revs to clone (forces use of pull)
2154 stream: use streaming clone if possible'''
2155 stream: use streaming clone if possible'''
2155
2156
2156 # now, all clients that can request uncompressed clones can
2157 # now, all clients that can request uncompressed clones can
2157 # read repo formats supported by all servers that can serve
2158 # read repo formats supported by all servers that can serve
2158 # them.
2159 # them.
2159
2160
2160 # if revlog format changes, client will have to check version
2161 # if revlog format changes, client will have to check version
2161 # and format flags on "stream" capability, and use
2162 # and format flags on "stream" capability, and use
2162 # uncompressed only if compatible.
2163 # uncompressed only if compatible.
2163
2164
2164 if stream and not heads:
2165 if stream and not heads:
2165 # 'stream' means remote revlog format is revlogv1 only
2166 # 'stream' means remote revlog format is revlogv1 only
2166 if remote.capable('stream'):
2167 if remote.capable('stream'):
2167 return self.stream_in(remote, set(('revlogv1',)))
2168 return self.stream_in(remote, set(('revlogv1',)))
2168 # otherwise, 'streamreqs' contains the remote revlog format
2169 # otherwise, 'streamreqs' contains the remote revlog format
2169 streamreqs = remote.capable('streamreqs')
2170 streamreqs = remote.capable('streamreqs')
2170 if streamreqs:
2171 if streamreqs:
2171 streamreqs = set(streamreqs.split(','))
2172 streamreqs = set(streamreqs.split(','))
2172 # if we support it, stream in and adjust our requirements
2173 # if we support it, stream in and adjust our requirements
2173 if not streamreqs - self.supportedformats:
2174 if not streamreqs - self.supportedformats:
2174 return self.stream_in(remote, streamreqs)
2175 return self.stream_in(remote, streamreqs)
2175 return self.pull(remote, heads)
2176 return self.pull(remote, heads)
2176
2177
2177 def pushkey(self, namespace, key, old, new):
2178 def pushkey(self, namespace, key, old, new):
2178 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2179 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2179 old=old, new=new)
2180 old=old, new=new)
2180 ret = pushkey.push(self, namespace, key, old, new)
2181 ret = pushkey.push(self, namespace, key, old, new)
2181 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2182 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2182 ret=ret)
2183 ret=ret)
2183 return ret
2184 return ret
2184
2185
2185 def listkeys(self, namespace):
2186 def listkeys(self, namespace):
2186 self.hook('prelistkeys', throw=True, namespace=namespace)
2187 self.hook('prelistkeys', throw=True, namespace=namespace)
2187 values = pushkey.list(self, namespace)
2188 values = pushkey.list(self, namespace)
2188 self.hook('listkeys', namespace=namespace, values=values)
2189 self.hook('listkeys', namespace=namespace, values=values)
2189 return values
2190 return values
2190
2191
2191 def debugwireargs(self, one, two, three=None, four=None, five=None):
2192 def debugwireargs(self, one, two, three=None, four=None, five=None):
2192 '''used to test argument passing over the wire'''
2193 '''used to test argument passing over the wire'''
2193 return "%s %s %s %s %s" % (one, two, three, four, five)
2194 return "%s %s %s %s %s" % (one, two, three, four, five)
2194
2195
2195 def savecommitmessage(self, text):
2196 def savecommitmessage(self, text):
2196 fp = self.opener('last-message.txt', 'wb')
2197 fp = self.opener('last-message.txt', 'wb')
2197 try:
2198 try:
2198 fp.write(text)
2199 fp.write(text)
2199 finally:
2200 finally:
2200 fp.close()
2201 fp.close()
2201 return self.pathto(fp.name[len(self.root)+1:])
2202 return self.pathto(fp.name[len(self.root)+1:])
2202
2203
2203 # used to avoid circular references so destructors work
2204 # used to avoid circular references so destructors work
2204 def aftertrans(files):
2205 def aftertrans(files):
2205 renamefiles = [tuple(t) for t in files]
2206 renamefiles = [tuple(t) for t in files]
2206 def a():
2207 def a():
2207 for src, dest in renamefiles:
2208 for src, dest in renamefiles:
2208 util.rename(src, dest)
2209 util.rename(src, dest)
2209 return a
2210 return a
2210
2211
2211 def undoname(fn):
2212 def undoname(fn):
2212 base, name = os.path.split(fn)
2213 base, name = os.path.split(fn)
2213 assert name.startswith('journal')
2214 assert name.startswith('journal')
2214 return os.path.join(base, name.replace('journal', 'undo', 1))
2215 return os.path.join(base, name.replace('journal', 'undo', 1))
2215
2216
2216 def instance(ui, path, create):
2217 def instance(ui, path, create):
2217 return localrepository(ui, util.urllocalpath(path), create)
2218 return localrepository(ui, util.urllocalpath(path), create)
2218
2219
2219 def islocal(path):
2220 def islocal(path):
2220 return True
2221 return True
@@ -1,282 +1,282
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms of the
9 This software may be used and distributed according to the terms of the
10 GNU General Public License version 2 or any later version.
10 GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phases' is an indicator that tells us how a changeset is
20 A 'changeset phases' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described below,
21 manipulated and communicated. The details of each phase is described below,
22 here we describe the properties they have in common.
22 here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not permanent and
24 Like bookmarks, phases are not stored in history and thus are not permanent and
25 leave no audit trail.
25 leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered, so they
27 First, no changeset can be in two phases at once. Phases are ordered, so they
28 can be considered from lowest to highest. The default, lowest phase is 'public'
28 can be considered from lowest to highest. The default, lowest phase is 'public'
29 - this is the normal phase of existing changesets. A child changeset can not be
29 - this is the normal phase of existing changesets. A child changeset can not be
30 in a lower phase than its parents.
30 in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 local commits are draft by default
39 local commits are draft by default
40
40
41 Phase movement and exchange
41 Phase movement and exchange
42 ============================
42 ============================
43
43
44 Phase data are exchanged by pushkey on pull and push. Some server have a
44 Phase data are exchanged by pushkey on pull and push. Some server have a
45 publish option set, we call them publishing server. Pushing to such server make
45 publish option set, we call them publishing server. Pushing to such server make
46 draft changeset publish.
46 draft changeset publish.
47
47
48 A small list of fact/rules define the exchange of phase:
48 A small list of fact/rules define the exchange of phase:
49
49
50 * old client never changes server states
50 * old client never changes server states
51 * pull never changes server states
51 * pull never changes server states
52 * publish and old server csets are seen as public by client
52 * publish and old server csets are seen as public by client
53
53
54 * Any secret changeset seens in another repository is lowered to at least draft
54 * Any secret changeset seens in another repository is lowered to at least draft
55
55
56
56
57 Here is the final table summing up the 49 possible usecase of phase exchange:
57 Here is the final table summing up the 49 possible usecase of phase exchange:
58
58
59 server
59 server
60 old publish non-publish
60 old publish non-publish
61 N X N D P N D P
61 N X N D P N D P
62 old client
62 old client
63 pull
63 pull
64 N - X/X - X/D X/P - X/D X/P
64 N - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
66 push
66 push
67 X X/X X/X X/P X/P X/P X/D X/D X/P
67 X X/X X/X X/P X/P X/P X/D X/D X/P
68 new client
68 new client
69 pull
69 pull
70 N - P/X - P/D P/P - D/D P/P
70 N - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
72 P - P/X - P/D P/P - P/D P/P
72 P - P/X - P/D P/P - P/D P/P
73 push
73 push
74 D P/X P/X P/P P/P P/P D/D D/D P/P
74 D P/X P/X P/P P/P P/P D/D D/D P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
76
76
77 Legend:
77 Legend:
78
78
79 A/B = final state on client / state on server
79 A/B = final state on client / state on server
80
80
81 * N = new/not present,
81 * N = new/not present,
82 * P = public,
82 * P = public,
83 * D = draft,
83 * D = draft,
84 * X = not tracked (ie: the old client or server has no internal way of
84 * X = not tracked (ie: the old client or server has no internal way of
85 recording the phase.)
85 recording the phase.)
86
86
87 passive = only pushes
87 passive = only pushes
88
88
89
89
90 A cell here can be read like this:
90 A cell here can be read like this:
91
91
92 "When a new client pushes a draft changeset (D) to a publishing server
92 "When a new client pushes a draft changeset (D) to a publishing server
93 where it's not present (N), it's marked public on both sides (P/P)."
93 where it's not present (N), it's marked public on both sides (P/P)."
94
94
95 Note: old client behave as publish server with Draft only content
95 Note: old client behave as publish server with Draft only content
96 - other people see it as public
96 - other people see it as public
97 - content is pushed as draft
97 - content is pushed as draft
98
98
99 """
99 """
100
100
101 import errno
101 import errno
102 from node import nullid, bin, hex, short
102 from node import nullid, bin, hex, short
103 from i18n import _
103 from i18n import _
104
104
105 allphases = range(3)
105 allphases = public, draft, secret = range(3)
106 trackedphases = allphases[1:]
106 trackedphases = allphases[1:]
107
107
108 def readroots(repo):
108 def readroots(repo):
109 """Read phase roots from disk"""
109 """Read phase roots from disk"""
110 roots = [set() for i in allphases]
110 roots = [set() for i in allphases]
111 roots[0].add(nullid)
111 roots[0].add(nullid)
112 try:
112 try:
113 f = repo.sopener('phaseroots')
113 f = repo.sopener('phaseroots')
114 try:
114 try:
115 for line in f:
115 for line in f:
116 phase, nh = line.strip().split()
116 phase, nh = line.strip().split()
117 roots[int(phase)].add(bin(nh))
117 roots[int(phase)].add(bin(nh))
118 finally:
118 finally:
119 f.close()
119 f.close()
120 except IOError, inst:
120 except IOError, inst:
121 if inst.errno != errno.ENOENT:
121 if inst.errno != errno.ENOENT:
122 raise
122 raise
123 return roots
123 return roots
124
124
125 def writeroots(repo):
125 def writeroots(repo):
126 """Write phase roots from disk"""
126 """Write phase roots from disk"""
127 f = repo.sopener('phaseroots', 'w', atomictemp=True)
127 f = repo.sopener('phaseroots', 'w', atomictemp=True)
128 try:
128 try:
129 for phase, roots in enumerate(repo._phaseroots):
129 for phase, roots in enumerate(repo._phaseroots):
130 for h in roots:
130 for h in roots:
131 f.write('%i %s\n' % (phase, hex(h)))
131 f.write('%i %s\n' % (phase, hex(h)))
132 repo._dirtyphases = False
132 repo._dirtyphases = False
133 finally:
133 finally:
134 f.close()
134 f.close()
135
135
136 def filterunknown(repo, phaseroots=None):
136 def filterunknown(repo, phaseroots=None):
137 """remove unknown nodes from the phase boundary
137 """remove unknown nodes from the phase boundary
138
138
139 no data is lost as unknown node only old data for their descentants
139 no data is lost as unknown node only old data for their descentants
140 """
140 """
141 if phaseroots is None:
141 if phaseroots is None:
142 phaseroots = repo._phaseroots
142 phaseroots = repo._phaseroots
143 for phase, nodes in enumerate(phaseroots):
143 for phase, nodes in enumerate(phaseroots):
144 missing = [node for node in nodes if node not in repo]
144 missing = [node for node in nodes if node not in repo]
145 if missing:
145 if missing:
146 for mnode in missing:
146 for mnode in missing:
147 msg = _('Removing unknown node %(n)s from %(p)i-phase boundary')
147 msg = _('Removing unknown node %(n)s from %(p)i-phase boundary')
148 repo.ui.debug(msg, {'n': short(mnode), 'p': phase})
148 repo.ui.debug(msg, {'n': short(mnode), 'p': phase})
149 nodes.symmetric_difference_update(missing)
149 nodes.symmetric_difference_update(missing)
150 repo._dirtyphases = True
150 repo._dirtyphases = True
151
151
152 def advanceboundary(repo, targetphase, nodes):
152 def advanceboundary(repo, targetphase, nodes):
153 """Add nodes to a phase changing other nodes phases if necessary.
153 """Add nodes to a phase changing other nodes phases if necessary.
154
154
155 This function move boundary *forward* this means that all nodes are set
155 This function move boundary *forward* this means that all nodes are set
156 in the target phase or kept in a *lower* phase.
156 in the target phase or kept in a *lower* phase.
157
157
158 Simplify boundary to contains phase roots only."""
158 Simplify boundary to contains phase roots only."""
159 delroots = [] # set of root deleted by this path
159 delroots = [] # set of root deleted by this path
160 for phase in xrange(targetphase + 1, len(allphases)):
160 for phase in xrange(targetphase + 1, len(allphases)):
161 # filter nodes that are not in a compatible phase already
161 # filter nodes that are not in a compatible phase already
162 # XXX rev phase cache might have been invalidated by a previous loop
162 # XXX rev phase cache might have been invalidated by a previous loop
163 # XXX we need to be smarter here
163 # XXX we need to be smarter here
164 nodes = [n for n in nodes if repo[n].phase() >= phase]
164 nodes = [n for n in nodes if repo[n].phase() >= phase]
165 if not nodes:
165 if not nodes:
166 break # no roots to move anymore
166 break # no roots to move anymore
167 roots = repo._phaseroots[phase]
167 roots = repo._phaseroots[phase]
168 olds = roots.copy()
168 olds = roots.copy()
169 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
169 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
170 roots.clear()
170 roots.clear()
171 roots.update(ctx.node() for ctx in ctxs)
171 roots.update(ctx.node() for ctx in ctxs)
172 if olds != roots:
172 if olds != roots:
173 # invalidate cache (we probably could be smarter here
173 # invalidate cache (we probably could be smarter here
174 if '_phaserev' in vars(repo):
174 if '_phaserev' in vars(repo):
175 del repo._phaserev
175 del repo._phaserev
176 repo._dirtyphases = True
176 repo._dirtyphases = True
177 # some roots may need to be declared for lower phases
177 # some roots may need to be declared for lower phases
178 delroots.extend(olds - roots)
178 delroots.extend(olds - roots)
179 # declare deleted root in the target phase
179 # declare deleted root in the target phase
180 if targetphase != 0:
180 if targetphase != 0:
181 retractboundary(repo, targetphase, delroots)
181 retractboundary(repo, targetphase, delroots)
182
182
183
183
184 def retractboundary(repo, targetphase, nodes):
184 def retractboundary(repo, targetphase, nodes):
185 """Set nodes back to a phase changing other nodes phases if necessary.
185 """Set nodes back to a phase changing other nodes phases if necessary.
186
186
187 This function move boundary *backward* this means that all nodes are set
187 This function move boundary *backward* this means that all nodes are set
188 in the target phase or kept in a *higher* phase.
188 in the target phase or kept in a *higher* phase.
189
189
190 Simplify boundary to contains phase roots only."""
190 Simplify boundary to contains phase roots only."""
191 currentroots = repo._phaseroots[targetphase]
191 currentroots = repo._phaseroots[targetphase]
192 newroots = [n for n in nodes if repo[n].phase() < targetphase]
192 newroots = [n for n in nodes if repo[n].phase() < targetphase]
193 if newroots:
193 if newroots:
194 currentroots.update(newroots)
194 currentroots.update(newroots)
195 ctxs = repo.set('roots(%ln::)', currentroots)
195 ctxs = repo.set('roots(%ln::)', currentroots)
196 currentroots.intersection_update(ctx.node() for ctx in ctxs)
196 currentroots.intersection_update(ctx.node() for ctx in ctxs)
197 if '_phaserev' in vars(repo):
197 if '_phaserev' in vars(repo):
198 del repo._phaserev
198 del repo._phaserev
199 repo._dirtyphases = True
199 repo._dirtyphases = True
200
200
201
201
202 def listphases(repo):
202 def listphases(repo):
203 """List phases root for serialisation over pushkey"""
203 """List phases root for serialisation over pushkey"""
204 keys = {}
204 keys = {}
205 for phase in trackedphases:
205 for phase in trackedphases:
206 for root in repo._phaseroots[phase]:
206 for root in repo._phaseroots[phase]:
207 keys[hex(root)] = '%i' % phase
207 keys[hex(root)] = '%i' % phase
208 if repo.ui.configbool('phases', 'publish', True):
208 if repo.ui.configbool('phases', 'publish', True):
209 # Add an extra data to let remote know we are a publishing repo.
209 # Add an extra data to let remote know we are a publishing repo.
210 # Publishing repo can't just pretend they are old repo. When pushing to
210 # Publishing repo can't just pretend they are old repo. When pushing to
211 # a publishing repo, the client still need to push phase boundary
211 # a publishing repo, the client still need to push phase boundary
212 #
212 #
213 # Push do not only push changeset. It also push phase data. New
213 # Push do not only push changeset. It also push phase data. New
214 # phase data may apply to common changeset which won't be push (as they
214 # phase data may apply to common changeset which won't be push (as they
215 # are common). Here is a very simple example:
215 # are common). Here is a very simple example:
216 #
216 #
217 # 1) repo A push changeset X as draft to repo B
217 # 1) repo A push changeset X as draft to repo B
218 # 2) repo B make changeset X public
218 # 2) repo B make changeset X public
219 # 3) repo B push to repo A. X is not pushed but the data that X as now
219 # 3) repo B push to repo A. X is not pushed but the data that X as now
220 # public should
220 # public should
221 #
221 #
222 # The server can't handle it on it's own as it has no idea of client
222 # The server can't handle it on it's own as it has no idea of client
223 # phase data.
223 # phase data.
224 keys['publishing'] = 'True'
224 keys['publishing'] = 'True'
225 return keys
225 return keys
226
226
227 def pushphase(repo, nhex, oldphasestr, newphasestr):
227 def pushphase(repo, nhex, oldphasestr, newphasestr):
228 """List phases root for serialisation over pushkey"""
228 """List phases root for serialisation over pushkey"""
229 lock = repo.lock()
229 lock = repo.lock()
230 try:
230 try:
231 currentphase = repo[nhex].phase()
231 currentphase = repo[nhex].phase()
232 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
232 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
233 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
233 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
234 if currentphase == oldphase and newphase < oldphase:
234 if currentphase == oldphase and newphase < oldphase:
235 advanceboundary(repo, newphase, [bin(nhex)])
235 advanceboundary(repo, newphase, [bin(nhex)])
236 return 1
236 return 1
237 else:
237 else:
238 return 0
238 return 0
239 finally:
239 finally:
240 lock.release()
240 lock.release()
241
241
242 def visibleheads(repo):
242 def visibleheads(repo):
243 """return the set of visible head of this repo"""
243 """return the set of visible head of this repo"""
244 # XXX we want a cache on this
244 # XXX we want a cache on this
245 sroots = repo._phaseroots[2]
245 sroots = repo._phaseroots[secret]
246 if sroots:
246 if sroots:
247 # XXX very slow revset. storing heads or secret "boundary" would help.
247 # XXX very slow revset. storing heads or secret "boundary" would help.
248 revset = repo.set('heads(not (%ln::))', sroots)
248 revset = repo.set('heads(not (%ln::))', sroots)
249
249
250 vheads = [ctx.node() for ctx in revset]
250 vheads = [ctx.node() for ctx in revset]
251 if not vheads:
251 if not vheads:
252 vheads.append(nullid)
252 vheads.append(nullid)
253 else:
253 else:
254 vheads = repo.heads()
254 vheads = repo.heads()
255 return vheads
255 return vheads
256
256
257 def analyzeremotephases(repo, subset, roots):
257 def analyzeremotephases(repo, subset, roots):
258 """Compute phases heads and root in a subset of node from root dict
258 """Compute phases heads and root in a subset of node from root dict
259
259
260 * subset is heads of the subset
260 * subset is heads of the subset
261 * roots is {<nodeid> => phase} mapping. key and value are string.
261 * roots is {<nodeid> => phase} mapping. key and value are string.
262
262
263 Accept unknown element input
263 Accept unknown element input
264 """
264 """
265 # build list from dictionary
265 # build list from dictionary
266 phaseroots = [[] for p in allphases]
266 phaseroots = [[] for p in allphases]
267 for nhex, phase in roots.iteritems():
267 for nhex, phase in roots.iteritems():
268 if nhex == 'publishing': # ignore data related to publish option
268 if nhex == 'publishing': # ignore data related to publish option
269 continue
269 continue
270 node = bin(nhex)
270 node = bin(nhex)
271 phase = int(phase)
271 phase = int(phase)
272 if node in repo:
272 if node in repo:
273 phaseroots[phase].append(node)
273 phaseroots[phase].append(node)
274 # compute heads
274 # compute heads
275 phaseheads = [[] for p in allphases]
275 phaseheads = [[] for p in allphases]
276 for phase in allphases[:-1]:
276 for phase in allphases[:-1]:
277 toproof = phaseroots[phase + 1]
277 toproof = phaseroots[phase + 1]
278 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
278 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
279 subset, toproof, toproof, subset)
279 subset, toproof, toproof, subset)
280 phaseheads[phase].extend(c.node() for c in revset)
280 phaseheads[phase].extend(c.node() for c in revset)
281 return phaseheads, phaseroots
281 return phaseheads, phaseroots
282
282
General Comments 0
You need to be logged in to leave comments. Login now