##// END OF EJS Templates
context: add method to return all bookmarks pointing to a node
David Soria Parra -
r13384:caa56175 default
parent child Browse files
Show More
@@ -1,1103 +1,1105
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, error, util, subrepo, patch, encoding
10 import ancestor, bdiff, error, util, subrepo, patch, encoding
11 import os, errno, stat
11 import os, errno, stat
12
12
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14
14
15 class changectx(object):
15 class changectx(object):
16 """A changecontext object makes access to data related to a particular
16 """A changecontext object makes access to data related to a particular
17 changeset convenient."""
17 changeset convenient."""
18 def __init__(self, repo, changeid=''):
18 def __init__(self, repo, changeid=''):
19 """changeid is a revision number, node, or tag"""
19 """changeid is a revision number, node, or tag"""
20 if changeid == '':
20 if changeid == '':
21 changeid = '.'
21 changeid = '.'
22 self._repo = repo
22 self._repo = repo
23 if isinstance(changeid, (long, int)):
23 if isinstance(changeid, (long, int)):
24 self._rev = changeid
24 self._rev = changeid
25 self._node = self._repo.changelog.node(changeid)
25 self._node = self._repo.changelog.node(changeid)
26 else:
26 else:
27 self._node = self._repo.lookup(changeid)
27 self._node = self._repo.lookup(changeid)
28 self._rev = self._repo.changelog.rev(self._node)
28 self._rev = self._repo.changelog.rev(self._node)
29
29
30 def __str__(self):
30 def __str__(self):
31 return short(self.node())
31 return short(self.node())
32
32
33 def __int__(self):
33 def __int__(self):
34 return self.rev()
34 return self.rev()
35
35
36 def __repr__(self):
36 def __repr__(self):
37 return "<changectx %s>" % str(self)
37 return "<changectx %s>" % str(self)
38
38
39 def __hash__(self):
39 def __hash__(self):
40 try:
40 try:
41 return hash(self._rev)
41 return hash(self._rev)
42 except AttributeError:
42 except AttributeError:
43 return id(self)
43 return id(self)
44
44
45 def __eq__(self, other):
45 def __eq__(self, other):
46 try:
46 try:
47 return self._rev == other._rev
47 return self._rev == other._rev
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50
50
51 def __ne__(self, other):
51 def __ne__(self, other):
52 return not (self == other)
52 return not (self == other)
53
53
54 def __nonzero__(self):
54 def __nonzero__(self):
55 return self._rev != nullrev
55 return self._rev != nullrev
56
56
57 @propertycache
57 @propertycache
58 def _changeset(self):
58 def _changeset(self):
59 return self._repo.changelog.read(self.node())
59 return self._repo.changelog.read(self.node())
60
60
61 @propertycache
61 @propertycache
62 def _manifest(self):
62 def _manifest(self):
63 return self._repo.manifest.read(self._changeset[0])
63 return self._repo.manifest.read(self._changeset[0])
64
64
65 @propertycache
65 @propertycache
66 def _manifestdelta(self):
66 def _manifestdelta(self):
67 return self._repo.manifest.readdelta(self._changeset[0])
67 return self._repo.manifest.readdelta(self._changeset[0])
68
68
69 @propertycache
69 @propertycache
70 def _parents(self):
70 def _parents(self):
71 p = self._repo.changelog.parentrevs(self._rev)
71 p = self._repo.changelog.parentrevs(self._rev)
72 if p[1] == nullrev:
72 if p[1] == nullrev:
73 p = p[:-1]
73 p = p[:-1]
74 return [changectx(self._repo, x) for x in p]
74 return [changectx(self._repo, x) for x in p]
75
75
76 @propertycache
76 @propertycache
77 def substate(self):
77 def substate(self):
78 return subrepo.state(self, self._repo.ui)
78 return subrepo.state(self, self._repo.ui)
79
79
80 def __contains__(self, key):
80 def __contains__(self, key):
81 return key in self._manifest
81 return key in self._manifest
82
82
83 def __getitem__(self, key):
83 def __getitem__(self, key):
84 return self.filectx(key)
84 return self.filectx(key)
85
85
86 def __iter__(self):
86 def __iter__(self):
87 for f in sorted(self._manifest):
87 for f in sorted(self._manifest):
88 yield f
88 yield f
89
89
90 def changeset(self):
90 def changeset(self):
91 return self._changeset
91 return self._changeset
92 def manifest(self):
92 def manifest(self):
93 return self._manifest
93 return self._manifest
94 def manifestnode(self):
94 def manifestnode(self):
95 return self._changeset[0]
95 return self._changeset[0]
96
96
97 def rev(self):
97 def rev(self):
98 return self._rev
98 return self._rev
99 def node(self):
99 def node(self):
100 return self._node
100 return self._node
101 def hex(self):
101 def hex(self):
102 return hex(self._node)
102 return hex(self._node)
103 def user(self):
103 def user(self):
104 return self._changeset[1]
104 return self._changeset[1]
105 def date(self):
105 def date(self):
106 return self._changeset[2]
106 return self._changeset[2]
107 def files(self):
107 def files(self):
108 return self._changeset[3]
108 return self._changeset[3]
109 def description(self):
109 def description(self):
110 return self._changeset[4]
110 return self._changeset[4]
111 def branch(self):
111 def branch(self):
112 return encoding.tolocal(self._changeset[5].get("branch"))
112 return encoding.tolocal(self._changeset[5].get("branch"))
113 def extra(self):
113 def extra(self):
114 return self._changeset[5]
114 return self._changeset[5]
115 def tags(self):
115 def tags(self):
116 return self._repo.nodetags(self._node)
116 return self._repo.nodetags(self._node)
117 def bookmarks(self):
118 return self._repo.nodebookmarks(self._node)
117
119
118 def parents(self):
120 def parents(self):
119 """return contexts for each parent changeset"""
121 """return contexts for each parent changeset"""
120 return self._parents
122 return self._parents
121
123
122 def p1(self):
124 def p1(self):
123 return self._parents[0]
125 return self._parents[0]
124
126
125 def p2(self):
127 def p2(self):
126 if len(self._parents) == 2:
128 if len(self._parents) == 2:
127 return self._parents[1]
129 return self._parents[1]
128 return changectx(self._repo, -1)
130 return changectx(self._repo, -1)
129
131
130 def children(self):
132 def children(self):
131 """return contexts for each child changeset"""
133 """return contexts for each child changeset"""
132 c = self._repo.changelog.children(self._node)
134 c = self._repo.changelog.children(self._node)
133 return [changectx(self._repo, x) for x in c]
135 return [changectx(self._repo, x) for x in c]
134
136
135 def ancestors(self):
137 def ancestors(self):
136 for a in self._repo.changelog.ancestors(self._rev):
138 for a in self._repo.changelog.ancestors(self._rev):
137 yield changectx(self._repo, a)
139 yield changectx(self._repo, a)
138
140
139 def descendants(self):
141 def descendants(self):
140 for d in self._repo.changelog.descendants(self._rev):
142 for d in self._repo.changelog.descendants(self._rev):
141 yield changectx(self._repo, d)
143 yield changectx(self._repo, d)
142
144
143 def _fileinfo(self, path):
145 def _fileinfo(self, path):
144 if '_manifest' in self.__dict__:
146 if '_manifest' in self.__dict__:
145 try:
147 try:
146 return self._manifest[path], self._manifest.flags(path)
148 return self._manifest[path], self._manifest.flags(path)
147 except KeyError:
149 except KeyError:
148 raise error.LookupError(self._node, path,
150 raise error.LookupError(self._node, path,
149 _('not found in manifest'))
151 _('not found in manifest'))
150 if '_manifestdelta' in self.__dict__ or path in self.files():
152 if '_manifestdelta' in self.__dict__ or path in self.files():
151 if path in self._manifestdelta:
153 if path in self._manifestdelta:
152 return self._manifestdelta[path], self._manifestdelta.flags(path)
154 return self._manifestdelta[path], self._manifestdelta.flags(path)
153 node, flag = self._repo.manifest.find(self._changeset[0], path)
155 node, flag = self._repo.manifest.find(self._changeset[0], path)
154 if not node:
156 if not node:
155 raise error.LookupError(self._node, path,
157 raise error.LookupError(self._node, path,
156 _('not found in manifest'))
158 _('not found in manifest'))
157
159
158 return node, flag
160 return node, flag
159
161
160 def filenode(self, path):
162 def filenode(self, path):
161 return self._fileinfo(path)[0]
163 return self._fileinfo(path)[0]
162
164
163 def flags(self, path):
165 def flags(self, path):
164 try:
166 try:
165 return self._fileinfo(path)[1]
167 return self._fileinfo(path)[1]
166 except error.LookupError:
168 except error.LookupError:
167 return ''
169 return ''
168
170
169 def filectx(self, path, fileid=None, filelog=None):
171 def filectx(self, path, fileid=None, filelog=None):
170 """get a file context from this changeset"""
172 """get a file context from this changeset"""
171 if fileid is None:
173 if fileid is None:
172 fileid = self.filenode(path)
174 fileid = self.filenode(path)
173 return filectx(self._repo, path, fileid=fileid,
175 return filectx(self._repo, path, fileid=fileid,
174 changectx=self, filelog=filelog)
176 changectx=self, filelog=filelog)
175
177
176 def ancestor(self, c2):
178 def ancestor(self, c2):
177 """
179 """
178 return the ancestor context of self and c2
180 return the ancestor context of self and c2
179 """
181 """
180 # deal with workingctxs
182 # deal with workingctxs
181 n2 = c2._node
183 n2 = c2._node
182 if n2 is None:
184 if n2 is None:
183 n2 = c2._parents[0]._node
185 n2 = c2._parents[0]._node
184 n = self._repo.changelog.ancestor(self._node, n2)
186 n = self._repo.changelog.ancestor(self._node, n2)
185 return changectx(self._repo, n)
187 return changectx(self._repo, n)
186
188
187 def walk(self, match):
189 def walk(self, match):
188 fset = set(match.files())
190 fset = set(match.files())
189 # for dirstate.walk, files=['.'] means "walk the whole tree".
191 # for dirstate.walk, files=['.'] means "walk the whole tree".
190 # follow that here, too
192 # follow that here, too
191 fset.discard('.')
193 fset.discard('.')
192 for fn in self:
194 for fn in self:
193 for ffn in fset:
195 for ffn in fset:
194 # match if the file is the exact name or a directory
196 # match if the file is the exact name or a directory
195 if ffn == fn or fn.startswith("%s/" % ffn):
197 if ffn == fn or fn.startswith("%s/" % ffn):
196 fset.remove(ffn)
198 fset.remove(ffn)
197 break
199 break
198 if match(fn):
200 if match(fn):
199 yield fn
201 yield fn
200 for fn in sorted(fset):
202 for fn in sorted(fset):
201 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
203 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
202 yield fn
204 yield fn
203
205
204 def sub(self, path):
206 def sub(self, path):
205 return subrepo.subrepo(self, path)
207 return subrepo.subrepo(self, path)
206
208
207 def diff(self, ctx2=None, match=None, **opts):
209 def diff(self, ctx2=None, match=None, **opts):
208 """Returns a diff generator for the given contexts and matcher"""
210 """Returns a diff generator for the given contexts and matcher"""
209 if ctx2 is None:
211 if ctx2 is None:
210 ctx2 = self.p1()
212 ctx2 = self.p1()
211 if ctx2 is not None and not isinstance(ctx2, changectx):
213 if ctx2 is not None and not isinstance(ctx2, changectx):
212 ctx2 = self._repo[ctx2]
214 ctx2 = self._repo[ctx2]
213 diffopts = patch.diffopts(self._repo.ui, opts)
215 diffopts = patch.diffopts(self._repo.ui, opts)
214 return patch.diff(self._repo, ctx2.node(), self.node(),
216 return patch.diff(self._repo, ctx2.node(), self.node(),
215 match=match, opts=diffopts)
217 match=match, opts=diffopts)
216
218
217 class filectx(object):
219 class filectx(object):
218 """A filecontext object makes access to data related to a particular
220 """A filecontext object makes access to data related to a particular
219 filerevision convenient."""
221 filerevision convenient."""
220 def __init__(self, repo, path, changeid=None, fileid=None,
222 def __init__(self, repo, path, changeid=None, fileid=None,
221 filelog=None, changectx=None):
223 filelog=None, changectx=None):
222 """changeid can be a changeset revision, node, or tag.
224 """changeid can be a changeset revision, node, or tag.
223 fileid can be a file revision or node."""
225 fileid can be a file revision or node."""
224 self._repo = repo
226 self._repo = repo
225 self._path = path
227 self._path = path
226
228
227 assert (changeid is not None
229 assert (changeid is not None
228 or fileid is not None
230 or fileid is not None
229 or changectx is not None), \
231 or changectx is not None), \
230 ("bad args: changeid=%r, fileid=%r, changectx=%r"
232 ("bad args: changeid=%r, fileid=%r, changectx=%r"
231 % (changeid, fileid, changectx))
233 % (changeid, fileid, changectx))
232
234
233 if filelog:
235 if filelog:
234 self._filelog = filelog
236 self._filelog = filelog
235
237
236 if changeid is not None:
238 if changeid is not None:
237 self._changeid = changeid
239 self._changeid = changeid
238 if changectx is not None:
240 if changectx is not None:
239 self._changectx = changectx
241 self._changectx = changectx
240 if fileid is not None:
242 if fileid is not None:
241 self._fileid = fileid
243 self._fileid = fileid
242
244
243 @propertycache
245 @propertycache
244 def _changectx(self):
246 def _changectx(self):
245 return changectx(self._repo, self._changeid)
247 return changectx(self._repo, self._changeid)
246
248
247 @propertycache
249 @propertycache
248 def _filelog(self):
250 def _filelog(self):
249 return self._repo.file(self._path)
251 return self._repo.file(self._path)
250
252
251 @propertycache
253 @propertycache
252 def _changeid(self):
254 def _changeid(self):
253 if '_changectx' in self.__dict__:
255 if '_changectx' in self.__dict__:
254 return self._changectx.rev()
256 return self._changectx.rev()
255 else:
257 else:
256 return self._filelog.linkrev(self._filerev)
258 return self._filelog.linkrev(self._filerev)
257
259
258 @propertycache
260 @propertycache
259 def _filenode(self):
261 def _filenode(self):
260 if '_fileid' in self.__dict__:
262 if '_fileid' in self.__dict__:
261 return self._filelog.lookup(self._fileid)
263 return self._filelog.lookup(self._fileid)
262 else:
264 else:
263 return self._changectx.filenode(self._path)
265 return self._changectx.filenode(self._path)
264
266
265 @propertycache
267 @propertycache
266 def _filerev(self):
268 def _filerev(self):
267 return self._filelog.rev(self._filenode)
269 return self._filelog.rev(self._filenode)
268
270
269 @propertycache
271 @propertycache
270 def _repopath(self):
272 def _repopath(self):
271 return self._path
273 return self._path
272
274
273 def __nonzero__(self):
275 def __nonzero__(self):
274 try:
276 try:
275 self._filenode
277 self._filenode
276 return True
278 return True
277 except error.LookupError:
279 except error.LookupError:
278 # file is missing
280 # file is missing
279 return False
281 return False
280
282
281 def __str__(self):
283 def __str__(self):
282 return "%s@%s" % (self.path(), short(self.node()))
284 return "%s@%s" % (self.path(), short(self.node()))
283
285
284 def __repr__(self):
286 def __repr__(self):
285 return "<filectx %s>" % str(self)
287 return "<filectx %s>" % str(self)
286
288
287 def __hash__(self):
289 def __hash__(self):
288 try:
290 try:
289 return hash((self._path, self._filenode))
291 return hash((self._path, self._filenode))
290 except AttributeError:
292 except AttributeError:
291 return id(self)
293 return id(self)
292
294
293 def __eq__(self, other):
295 def __eq__(self, other):
294 try:
296 try:
295 return (self._path == other._path
297 return (self._path == other._path
296 and self._filenode == other._filenode)
298 and self._filenode == other._filenode)
297 except AttributeError:
299 except AttributeError:
298 return False
300 return False
299
301
300 def __ne__(self, other):
302 def __ne__(self, other):
301 return not (self == other)
303 return not (self == other)
302
304
303 def filectx(self, fileid):
305 def filectx(self, fileid):
304 '''opens an arbitrary revision of the file without
306 '''opens an arbitrary revision of the file without
305 opening a new filelog'''
307 opening a new filelog'''
306 return filectx(self._repo, self._path, fileid=fileid,
308 return filectx(self._repo, self._path, fileid=fileid,
307 filelog=self._filelog)
309 filelog=self._filelog)
308
310
309 def filerev(self):
311 def filerev(self):
310 return self._filerev
312 return self._filerev
311 def filenode(self):
313 def filenode(self):
312 return self._filenode
314 return self._filenode
313 def flags(self):
315 def flags(self):
314 return self._changectx.flags(self._path)
316 return self._changectx.flags(self._path)
315 def filelog(self):
317 def filelog(self):
316 return self._filelog
318 return self._filelog
317
319
318 def rev(self):
320 def rev(self):
319 if '_changectx' in self.__dict__:
321 if '_changectx' in self.__dict__:
320 return self._changectx.rev()
322 return self._changectx.rev()
321 if '_changeid' in self.__dict__:
323 if '_changeid' in self.__dict__:
322 return self._changectx.rev()
324 return self._changectx.rev()
323 return self._filelog.linkrev(self._filerev)
325 return self._filelog.linkrev(self._filerev)
324
326
325 def linkrev(self):
327 def linkrev(self):
326 return self._filelog.linkrev(self._filerev)
328 return self._filelog.linkrev(self._filerev)
327 def node(self):
329 def node(self):
328 return self._changectx.node()
330 return self._changectx.node()
329 def hex(self):
331 def hex(self):
330 return hex(self.node())
332 return hex(self.node())
331 def user(self):
333 def user(self):
332 return self._changectx.user()
334 return self._changectx.user()
333 def date(self):
335 def date(self):
334 return self._changectx.date()
336 return self._changectx.date()
335 def files(self):
337 def files(self):
336 return self._changectx.files()
338 return self._changectx.files()
337 def description(self):
339 def description(self):
338 return self._changectx.description()
340 return self._changectx.description()
339 def branch(self):
341 def branch(self):
340 return self._changectx.branch()
342 return self._changectx.branch()
341 def extra(self):
343 def extra(self):
342 return self._changectx.extra()
344 return self._changectx.extra()
343 def manifest(self):
345 def manifest(self):
344 return self._changectx.manifest()
346 return self._changectx.manifest()
345 def changectx(self):
347 def changectx(self):
346 return self._changectx
348 return self._changectx
347
349
348 def data(self):
350 def data(self):
349 return self._filelog.read(self._filenode)
351 return self._filelog.read(self._filenode)
350 def path(self):
352 def path(self):
351 return self._path
353 return self._path
352 def size(self):
354 def size(self):
353 return self._filelog.size(self._filerev)
355 return self._filelog.size(self._filerev)
354
356
355 def cmp(self, fctx):
357 def cmp(self, fctx):
356 """compare with other file context
358 """compare with other file context
357
359
358 returns True if different than fctx.
360 returns True if different than fctx.
359 """
361 """
360 if (fctx._filerev is None and self._repo._encodefilterpats
362 if (fctx._filerev is None and self._repo._encodefilterpats
361 or self.size() == fctx.size()):
363 or self.size() == fctx.size()):
362 return self._filelog.cmp(self._filenode, fctx.data())
364 return self._filelog.cmp(self._filenode, fctx.data())
363
365
364 return True
366 return True
365
367
366 def renamed(self):
368 def renamed(self):
367 """check if file was actually renamed in this changeset revision
369 """check if file was actually renamed in this changeset revision
368
370
369 If rename logged in file revision, we report copy for changeset only
371 If rename logged in file revision, we report copy for changeset only
370 if file revisions linkrev points back to the changeset in question
372 if file revisions linkrev points back to the changeset in question
371 or both changeset parents contain different file revisions.
373 or both changeset parents contain different file revisions.
372 """
374 """
373
375
374 renamed = self._filelog.renamed(self._filenode)
376 renamed = self._filelog.renamed(self._filenode)
375 if not renamed:
377 if not renamed:
376 return renamed
378 return renamed
377
379
378 if self.rev() == self.linkrev():
380 if self.rev() == self.linkrev():
379 return renamed
381 return renamed
380
382
381 name = self.path()
383 name = self.path()
382 fnode = self._filenode
384 fnode = self._filenode
383 for p in self._changectx.parents():
385 for p in self._changectx.parents():
384 try:
386 try:
385 if fnode == p.filenode(name):
387 if fnode == p.filenode(name):
386 return None
388 return None
387 except error.LookupError:
389 except error.LookupError:
388 pass
390 pass
389 return renamed
391 return renamed
390
392
391 def parents(self):
393 def parents(self):
392 p = self._path
394 p = self._path
393 fl = self._filelog
395 fl = self._filelog
394 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
396 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
395
397
396 r = self._filelog.renamed(self._filenode)
398 r = self._filelog.renamed(self._filenode)
397 if r:
399 if r:
398 pl[0] = (r[0], r[1], None)
400 pl[0] = (r[0], r[1], None)
399
401
400 return [filectx(self._repo, p, fileid=n, filelog=l)
402 return [filectx(self._repo, p, fileid=n, filelog=l)
401 for p, n, l in pl if n != nullid]
403 for p, n, l in pl if n != nullid]
402
404
403 def children(self):
405 def children(self):
404 # hard for renames
406 # hard for renames
405 c = self._filelog.children(self._filenode)
407 c = self._filelog.children(self._filenode)
406 return [filectx(self._repo, self._path, fileid=x,
408 return [filectx(self._repo, self._path, fileid=x,
407 filelog=self._filelog) for x in c]
409 filelog=self._filelog) for x in c]
408
410
409 def annotate(self, follow=False, linenumber=None):
411 def annotate(self, follow=False, linenumber=None):
410 '''returns a list of tuples of (ctx, line) for each line
412 '''returns a list of tuples of (ctx, line) for each line
411 in the file, where ctx is the filectx of the node where
413 in the file, where ctx is the filectx of the node where
412 that line was last changed.
414 that line was last changed.
413 This returns tuples of ((ctx, linenumber), line) for each line,
415 This returns tuples of ((ctx, linenumber), line) for each line,
414 if "linenumber" parameter is NOT "None".
416 if "linenumber" parameter is NOT "None".
415 In such tuples, linenumber means one at the first appearance
417 In such tuples, linenumber means one at the first appearance
416 in the managed file.
418 in the managed file.
417 To reduce annotation cost,
419 To reduce annotation cost,
418 this returns fixed value(False is used) as linenumber,
420 this returns fixed value(False is used) as linenumber,
419 if "linenumber" parameter is "False".'''
421 if "linenumber" parameter is "False".'''
420
422
421 def decorate_compat(text, rev):
423 def decorate_compat(text, rev):
422 return ([rev] * len(text.splitlines()), text)
424 return ([rev] * len(text.splitlines()), text)
423
425
424 def without_linenumber(text, rev):
426 def without_linenumber(text, rev):
425 return ([(rev, False)] * len(text.splitlines()), text)
427 return ([(rev, False)] * len(text.splitlines()), text)
426
428
427 def with_linenumber(text, rev):
429 def with_linenumber(text, rev):
428 size = len(text.splitlines())
430 size = len(text.splitlines())
429 return ([(rev, i) for i in xrange(1, size + 1)], text)
431 return ([(rev, i) for i in xrange(1, size + 1)], text)
430
432
431 decorate = (((linenumber is None) and decorate_compat) or
433 decorate = (((linenumber is None) and decorate_compat) or
432 (linenumber and with_linenumber) or
434 (linenumber and with_linenumber) or
433 without_linenumber)
435 without_linenumber)
434
436
435 def pair(parent, child):
437 def pair(parent, child):
436 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
438 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
437 child[0][b1:b2] = parent[0][a1:a2]
439 child[0][b1:b2] = parent[0][a1:a2]
438 return child
440 return child
439
441
440 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
442 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
441 def getctx(path, fileid):
443 def getctx(path, fileid):
442 log = path == self._path and self._filelog or getlog(path)
444 log = path == self._path and self._filelog or getlog(path)
443 return filectx(self._repo, path, fileid=fileid, filelog=log)
445 return filectx(self._repo, path, fileid=fileid, filelog=log)
444 getctx = util.lrucachefunc(getctx)
446 getctx = util.lrucachefunc(getctx)
445
447
446 def parents(f):
448 def parents(f):
447 # we want to reuse filectx objects as much as possible
449 # we want to reuse filectx objects as much as possible
448 p = f._path
450 p = f._path
449 if f._filerev is None: # working dir
451 if f._filerev is None: # working dir
450 pl = [(n.path(), n.filerev()) for n in f.parents()]
452 pl = [(n.path(), n.filerev()) for n in f.parents()]
451 else:
453 else:
452 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
454 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
453
455
454 if follow:
456 if follow:
455 r = f.renamed()
457 r = f.renamed()
456 if r:
458 if r:
457 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
459 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
458
460
459 return [getctx(p, n) for p, n in pl if n != nullrev]
461 return [getctx(p, n) for p, n in pl if n != nullrev]
460
462
461 # use linkrev to find the first changeset where self appeared
463 # use linkrev to find the first changeset where self appeared
462 if self.rev() != self.linkrev():
464 if self.rev() != self.linkrev():
463 base = self.filectx(self.filerev())
465 base = self.filectx(self.filerev())
464 else:
466 else:
465 base = self
467 base = self
466
468
467 # find all ancestors
469 # find all ancestors
468 needed = {base: 1}
470 needed = {base: 1}
469 visit = [base]
471 visit = [base]
470 files = [base._path]
472 files = [base._path]
471 while visit:
473 while visit:
472 f = visit.pop(0)
474 f = visit.pop(0)
473 for p in parents(f):
475 for p in parents(f):
474 if p not in needed:
476 if p not in needed:
475 needed[p] = 1
477 needed[p] = 1
476 visit.append(p)
478 visit.append(p)
477 if p._path not in files:
479 if p._path not in files:
478 files.append(p._path)
480 files.append(p._path)
479 else:
481 else:
480 # count how many times we'll use this
482 # count how many times we'll use this
481 needed[p] += 1
483 needed[p] += 1
482
484
483 # sort by revision (per file) which is a topological order
485 # sort by revision (per file) which is a topological order
484 visit = []
486 visit = []
485 for f in files:
487 for f in files:
486 visit.extend(n for n in needed if n._path == f)
488 visit.extend(n for n in needed if n._path == f)
487
489
488 hist = {}
490 hist = {}
489 for f in sorted(visit, key=lambda x: x.rev()):
491 for f in sorted(visit, key=lambda x: x.rev()):
490 curr = decorate(f.data(), f)
492 curr = decorate(f.data(), f)
491 for p in parents(f):
493 for p in parents(f):
492 curr = pair(hist[p], curr)
494 curr = pair(hist[p], curr)
493 # trim the history of unneeded revs
495 # trim the history of unneeded revs
494 needed[p] -= 1
496 needed[p] -= 1
495 if not needed[p]:
497 if not needed[p]:
496 del hist[p]
498 del hist[p]
497 hist[f] = curr
499 hist[f] = curr
498
500
499 return zip(hist[f][0], hist[f][1].splitlines(True))
501 return zip(hist[f][0], hist[f][1].splitlines(True))
500
502
501 def ancestor(self, fc2, actx=None):
503 def ancestor(self, fc2, actx=None):
502 """
504 """
503 find the common ancestor file context, if any, of self, and fc2
505 find the common ancestor file context, if any, of self, and fc2
504
506
505 If actx is given, it must be the changectx of the common ancestor
507 If actx is given, it must be the changectx of the common ancestor
506 of self's and fc2's respective changesets.
508 of self's and fc2's respective changesets.
507 """
509 """
508
510
509 if actx is None:
511 if actx is None:
510 actx = self.changectx().ancestor(fc2.changectx())
512 actx = self.changectx().ancestor(fc2.changectx())
511
513
512 # the trivial case: changesets are unrelated, files must be too
514 # the trivial case: changesets are unrelated, files must be too
513 if not actx:
515 if not actx:
514 return None
516 return None
515
517
516 # the easy case: no (relevant) renames
518 # the easy case: no (relevant) renames
517 if fc2.path() == self.path() and self.path() in actx:
519 if fc2.path() == self.path() and self.path() in actx:
518 return actx[self.path()]
520 return actx[self.path()]
519 acache = {}
521 acache = {}
520
522
521 # prime the ancestor cache for the working directory
523 # prime the ancestor cache for the working directory
522 for c in (self, fc2):
524 for c in (self, fc2):
523 if c._filerev is None:
525 if c._filerev is None:
524 pl = [(n.path(), n.filenode()) for n in c.parents()]
526 pl = [(n.path(), n.filenode()) for n in c.parents()]
525 acache[(c._path, None)] = pl
527 acache[(c._path, None)] = pl
526
528
527 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
529 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
528 def parents(vertex):
530 def parents(vertex):
529 if vertex in acache:
531 if vertex in acache:
530 return acache[vertex]
532 return acache[vertex]
531 f, n = vertex
533 f, n = vertex
532 if f not in flcache:
534 if f not in flcache:
533 flcache[f] = self._repo.file(f)
535 flcache[f] = self._repo.file(f)
534 fl = flcache[f]
536 fl = flcache[f]
535 pl = [(f, p) for p in fl.parents(n) if p != nullid]
537 pl = [(f, p) for p in fl.parents(n) if p != nullid]
536 re = fl.renamed(n)
538 re = fl.renamed(n)
537 if re:
539 if re:
538 pl.append(re)
540 pl.append(re)
539 acache[vertex] = pl
541 acache[vertex] = pl
540 return pl
542 return pl
541
543
542 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
544 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
543 v = ancestor.ancestor(a, b, parents)
545 v = ancestor.ancestor(a, b, parents)
544 if v:
546 if v:
545 f, n = v
547 f, n = v
546 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
548 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
547
549
548 return None
550 return None
549
551
550 def ancestors(self):
552 def ancestors(self):
551 seen = set(str(self))
553 seen = set(str(self))
552 visit = [self]
554 visit = [self]
553 while visit:
555 while visit:
554 for parent in visit.pop(0).parents():
556 for parent in visit.pop(0).parents():
555 s = str(parent)
557 s = str(parent)
556 if s not in seen:
558 if s not in seen:
557 visit.append(parent)
559 visit.append(parent)
558 seen.add(s)
560 seen.add(s)
559 yield parent
561 yield parent
560
562
561 class workingctx(changectx):
563 class workingctx(changectx):
562 """A workingctx object makes access to data related to
564 """A workingctx object makes access to data related to
563 the current working directory convenient.
565 the current working directory convenient.
564 date - any valid date string or (unixtime, offset), or None.
566 date - any valid date string or (unixtime, offset), or None.
565 user - username string, or None.
567 user - username string, or None.
566 extra - a dictionary of extra values, or None.
568 extra - a dictionary of extra values, or None.
567 changes - a list of file lists as returned by localrepo.status()
569 changes - a list of file lists as returned by localrepo.status()
568 or None to use the repository status.
570 or None to use the repository status.
569 """
571 """
570 def __init__(self, repo, text="", user=None, date=None, extra=None,
572 def __init__(self, repo, text="", user=None, date=None, extra=None,
571 changes=None):
573 changes=None):
572 self._repo = repo
574 self._repo = repo
573 self._rev = None
575 self._rev = None
574 self._node = None
576 self._node = None
575 self._text = text
577 self._text = text
576 if date:
578 if date:
577 self._date = util.parsedate(date)
579 self._date = util.parsedate(date)
578 if user:
580 if user:
579 self._user = user
581 self._user = user
580 if changes:
582 if changes:
581 self._status = list(changes[:4])
583 self._status = list(changes[:4])
582 self._unknown = changes[4]
584 self._unknown = changes[4]
583 self._ignored = changes[5]
585 self._ignored = changes[5]
584 self._clean = changes[6]
586 self._clean = changes[6]
585 else:
587 else:
586 self._unknown = None
588 self._unknown = None
587 self._ignored = None
589 self._ignored = None
588 self._clean = None
590 self._clean = None
589
591
590 self._extra = {}
592 self._extra = {}
591 if extra:
593 if extra:
592 self._extra = extra.copy()
594 self._extra = extra.copy()
593 if 'branch' not in self._extra:
595 if 'branch' not in self._extra:
594 try:
596 try:
595 branch = encoding.fromlocal(self._repo.dirstate.branch())
597 branch = encoding.fromlocal(self._repo.dirstate.branch())
596 except UnicodeDecodeError:
598 except UnicodeDecodeError:
597 raise util.Abort(_('branch name not in UTF-8!'))
599 raise util.Abort(_('branch name not in UTF-8!'))
598 self._extra['branch'] = branch
600 self._extra['branch'] = branch
599 if self._extra['branch'] == '':
601 if self._extra['branch'] == '':
600 self._extra['branch'] = 'default'
602 self._extra['branch'] = 'default'
601
603
602 def __str__(self):
604 def __str__(self):
603 return str(self._parents[0]) + "+"
605 return str(self._parents[0]) + "+"
604
606
605 def __repr__(self):
607 def __repr__(self):
606 return "<workingctx %s>" % str(self)
608 return "<workingctx %s>" % str(self)
607
609
608 def __nonzero__(self):
610 def __nonzero__(self):
609 return True
611 return True
610
612
611 def __contains__(self, key):
613 def __contains__(self, key):
612 return self._repo.dirstate[key] not in "?r"
614 return self._repo.dirstate[key] not in "?r"
613
615
614 @propertycache
616 @propertycache
615 def _manifest(self):
617 def _manifest(self):
616 """generate a manifest corresponding to the working directory"""
618 """generate a manifest corresponding to the working directory"""
617
619
618 if self._unknown is None:
620 if self._unknown is None:
619 self.status(unknown=True)
621 self.status(unknown=True)
620
622
621 man = self._parents[0].manifest().copy()
623 man = self._parents[0].manifest().copy()
622 copied = self._repo.dirstate.copies()
624 copied = self._repo.dirstate.copies()
623 if len(self._parents) > 1:
625 if len(self._parents) > 1:
624 man2 = self.p2().manifest()
626 man2 = self.p2().manifest()
625 def getman(f):
627 def getman(f):
626 if f in man:
628 if f in man:
627 return man
629 return man
628 return man2
630 return man2
629 else:
631 else:
630 getman = lambda f: man
632 getman = lambda f: man
631 def cf(f):
633 def cf(f):
632 f = copied.get(f, f)
634 f = copied.get(f, f)
633 return getman(f).flags(f)
635 return getman(f).flags(f)
634 ff = self._repo.dirstate.flagfunc(cf)
636 ff = self._repo.dirstate.flagfunc(cf)
635 modified, added, removed, deleted = self._status
637 modified, added, removed, deleted = self._status
636 unknown = self._unknown
638 unknown = self._unknown
637 for i, l in (("a", added), ("m", modified), ("u", unknown)):
639 for i, l in (("a", added), ("m", modified), ("u", unknown)):
638 for f in l:
640 for f in l:
639 orig = copied.get(f, f)
641 orig = copied.get(f, f)
640 man[f] = getman(orig).get(orig, nullid) + i
642 man[f] = getman(orig).get(orig, nullid) + i
641 try:
643 try:
642 man.set(f, ff(f))
644 man.set(f, ff(f))
643 except OSError:
645 except OSError:
644 pass
646 pass
645
647
646 for f in deleted + removed:
648 for f in deleted + removed:
647 if f in man:
649 if f in man:
648 del man[f]
650 del man[f]
649
651
650 return man
652 return man
651
653
652 @propertycache
654 @propertycache
653 def _status(self):
655 def _status(self):
654 return self._repo.status()[:4]
656 return self._repo.status()[:4]
655
657
656 @propertycache
658 @propertycache
657 def _user(self):
659 def _user(self):
658 return self._repo.ui.username()
660 return self._repo.ui.username()
659
661
660 @propertycache
662 @propertycache
661 def _date(self):
663 def _date(self):
662 return util.makedate()
664 return util.makedate()
663
665
664 @propertycache
666 @propertycache
665 def _parents(self):
667 def _parents(self):
666 p = self._repo.dirstate.parents()
668 p = self._repo.dirstate.parents()
667 if p[1] == nullid:
669 if p[1] == nullid:
668 p = p[:-1]
670 p = p[:-1]
669 self._parents = [changectx(self._repo, x) for x in p]
671 self._parents = [changectx(self._repo, x) for x in p]
670 return self._parents
672 return self._parents
671
673
672 def status(self, ignored=False, clean=False, unknown=False):
674 def status(self, ignored=False, clean=False, unknown=False):
673 """Explicit status query
675 """Explicit status query
674 Unless this method is used to query the working copy status, the
676 Unless this method is used to query the working copy status, the
675 _status property will implicitly read the status using its default
677 _status property will implicitly read the status using its default
676 arguments."""
678 arguments."""
677 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
679 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
678 self._unknown = self._ignored = self._clean = None
680 self._unknown = self._ignored = self._clean = None
679 if unknown:
681 if unknown:
680 self._unknown = stat[4]
682 self._unknown = stat[4]
681 if ignored:
683 if ignored:
682 self._ignored = stat[5]
684 self._ignored = stat[5]
683 if clean:
685 if clean:
684 self._clean = stat[6]
686 self._clean = stat[6]
685 self._status = stat[:4]
687 self._status = stat[:4]
686 return stat
688 return stat
687
689
688 def manifest(self):
690 def manifest(self):
689 return self._manifest
691 return self._manifest
690 def user(self):
692 def user(self):
691 return self._user or self._repo.ui.username()
693 return self._user or self._repo.ui.username()
692 def date(self):
694 def date(self):
693 return self._date
695 return self._date
694 def description(self):
696 def description(self):
695 return self._text
697 return self._text
696 def files(self):
698 def files(self):
697 return sorted(self._status[0] + self._status[1] + self._status[2])
699 return sorted(self._status[0] + self._status[1] + self._status[2])
698
700
699 def modified(self):
701 def modified(self):
700 return self._status[0]
702 return self._status[0]
701 def added(self):
703 def added(self):
702 return self._status[1]
704 return self._status[1]
703 def removed(self):
705 def removed(self):
704 return self._status[2]
706 return self._status[2]
705 def deleted(self):
707 def deleted(self):
706 return self._status[3]
708 return self._status[3]
707 def unknown(self):
709 def unknown(self):
708 assert self._unknown is not None # must call status first
710 assert self._unknown is not None # must call status first
709 return self._unknown
711 return self._unknown
710 def ignored(self):
712 def ignored(self):
711 assert self._ignored is not None # must call status first
713 assert self._ignored is not None # must call status first
712 return self._ignored
714 return self._ignored
713 def clean(self):
715 def clean(self):
714 assert self._clean is not None # must call status first
716 assert self._clean is not None # must call status first
715 return self._clean
717 return self._clean
716 def branch(self):
718 def branch(self):
717 return encoding.tolocal(self._extra['branch'])
719 return encoding.tolocal(self._extra['branch'])
718 def extra(self):
720 def extra(self):
719 return self._extra
721 return self._extra
720
722
721 def tags(self):
723 def tags(self):
722 t = []
724 t = []
723 [t.extend(p.tags()) for p in self.parents()]
725 [t.extend(p.tags()) for p in self.parents()]
724 return t
726 return t
725
727
726 def children(self):
728 def children(self):
727 return []
729 return []
728
730
729 def flags(self, path):
731 def flags(self, path):
730 if '_manifest' in self.__dict__:
732 if '_manifest' in self.__dict__:
731 try:
733 try:
732 return self._manifest.flags(path)
734 return self._manifest.flags(path)
733 except KeyError:
735 except KeyError:
734 return ''
736 return ''
735
737
736 orig = self._repo.dirstate.copies().get(path, path)
738 orig = self._repo.dirstate.copies().get(path, path)
737
739
738 def findflag(ctx):
740 def findflag(ctx):
739 mnode = ctx.changeset()[0]
741 mnode = ctx.changeset()[0]
740 node, flag = self._repo.manifest.find(mnode, orig)
742 node, flag = self._repo.manifest.find(mnode, orig)
741 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
743 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
742 try:
744 try:
743 return ff(path)
745 return ff(path)
744 except OSError:
746 except OSError:
745 pass
747 pass
746
748
747 flag = findflag(self._parents[0])
749 flag = findflag(self._parents[0])
748 if flag is None and len(self.parents()) > 1:
750 if flag is None and len(self.parents()) > 1:
749 flag = findflag(self._parents[1])
751 flag = findflag(self._parents[1])
750 if flag is None or self._repo.dirstate[path] == 'r':
752 if flag is None or self._repo.dirstate[path] == 'r':
751 return ''
753 return ''
752 return flag
754 return flag
753
755
754 def filectx(self, path, filelog=None):
756 def filectx(self, path, filelog=None):
755 """get a file context from the working directory"""
757 """get a file context from the working directory"""
756 return workingfilectx(self._repo, path, workingctx=self,
758 return workingfilectx(self._repo, path, workingctx=self,
757 filelog=filelog)
759 filelog=filelog)
758
760
759 def ancestor(self, c2):
761 def ancestor(self, c2):
760 """return the ancestor context of self and c2"""
762 """return the ancestor context of self and c2"""
761 return self._parents[0].ancestor(c2) # punt on two parents for now
763 return self._parents[0].ancestor(c2) # punt on two parents for now
762
764
763 def walk(self, match):
765 def walk(self, match):
764 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
766 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
765 True, False))
767 True, False))
766
768
767 def dirty(self, missing=False):
769 def dirty(self, missing=False):
768 "check whether a working directory is modified"
770 "check whether a working directory is modified"
769 # check subrepos first
771 # check subrepos first
770 for s in self.substate:
772 for s in self.substate:
771 if self.sub(s).dirty():
773 if self.sub(s).dirty():
772 return True
774 return True
773 # check current working dir
775 # check current working dir
774 return (self.p2() or self.branch() != self.p1().branch() or
776 return (self.p2() or self.branch() != self.p1().branch() or
775 self.modified() or self.added() or self.removed() or
777 self.modified() or self.added() or self.removed() or
776 (missing and self.deleted()))
778 (missing and self.deleted()))
777
779
778 def add(self, list, prefix=""):
780 def add(self, list, prefix=""):
779 join = lambda f: os.path.join(prefix, f)
781 join = lambda f: os.path.join(prefix, f)
780 wlock = self._repo.wlock()
782 wlock = self._repo.wlock()
781 ui, ds = self._repo.ui, self._repo.dirstate
783 ui, ds = self._repo.ui, self._repo.dirstate
782 try:
784 try:
783 rejected = []
785 rejected = []
784 for f in list:
786 for f in list:
785 p = self._repo.wjoin(f)
787 p = self._repo.wjoin(f)
786 try:
788 try:
787 st = os.lstat(p)
789 st = os.lstat(p)
788 except:
790 except:
789 ui.warn(_("%s does not exist!\n") % join(f))
791 ui.warn(_("%s does not exist!\n") % join(f))
790 rejected.append(f)
792 rejected.append(f)
791 continue
793 continue
792 if st.st_size > 10000000:
794 if st.st_size > 10000000:
793 ui.warn(_("%s: up to %d MB of RAM may be required "
795 ui.warn(_("%s: up to %d MB of RAM may be required "
794 "to manage this file\n"
796 "to manage this file\n"
795 "(use 'hg revert %s' to cancel the "
797 "(use 'hg revert %s' to cancel the "
796 "pending addition)\n")
798 "pending addition)\n")
797 % (f, 3 * st.st_size // 1000000, join(f)))
799 % (f, 3 * st.st_size // 1000000, join(f)))
798 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
800 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
799 ui.warn(_("%s not added: only files and symlinks "
801 ui.warn(_("%s not added: only files and symlinks "
800 "supported currently\n") % join(f))
802 "supported currently\n") % join(f))
801 rejected.append(p)
803 rejected.append(p)
802 elif ds[f] in 'amn':
804 elif ds[f] in 'amn':
803 ui.warn(_("%s already tracked!\n") % join(f))
805 ui.warn(_("%s already tracked!\n") % join(f))
804 elif ds[f] == 'r':
806 elif ds[f] == 'r':
805 ds.normallookup(f)
807 ds.normallookup(f)
806 else:
808 else:
807 ds.add(f)
809 ds.add(f)
808 return rejected
810 return rejected
809 finally:
811 finally:
810 wlock.release()
812 wlock.release()
811
813
812 def forget(self, list):
814 def forget(self, list):
813 wlock = self._repo.wlock()
815 wlock = self._repo.wlock()
814 try:
816 try:
815 for f in list:
817 for f in list:
816 if self._repo.dirstate[f] != 'a':
818 if self._repo.dirstate[f] != 'a':
817 self._repo.ui.warn(_("%s not added!\n") % f)
819 self._repo.ui.warn(_("%s not added!\n") % f)
818 else:
820 else:
819 self._repo.dirstate.forget(f)
821 self._repo.dirstate.forget(f)
820 finally:
822 finally:
821 wlock.release()
823 wlock.release()
822
824
823 def ancestors(self):
825 def ancestors(self):
824 for a in self._repo.changelog.ancestors(
826 for a in self._repo.changelog.ancestors(
825 *[p.rev() for p in self._parents]):
827 *[p.rev() for p in self._parents]):
826 yield changectx(self._repo, a)
828 yield changectx(self._repo, a)
827
829
828 def remove(self, list, unlink=False):
830 def remove(self, list, unlink=False):
829 if unlink:
831 if unlink:
830 for f in list:
832 for f in list:
831 try:
833 try:
832 util.unlinkpath(self._repo.wjoin(f))
834 util.unlinkpath(self._repo.wjoin(f))
833 except OSError, inst:
835 except OSError, inst:
834 if inst.errno != errno.ENOENT:
836 if inst.errno != errno.ENOENT:
835 raise
837 raise
836 wlock = self._repo.wlock()
838 wlock = self._repo.wlock()
837 try:
839 try:
838 for f in list:
840 for f in list:
839 if unlink and os.path.lexists(self._repo.wjoin(f)):
841 if unlink and os.path.lexists(self._repo.wjoin(f)):
840 self._repo.ui.warn(_("%s still exists!\n") % f)
842 self._repo.ui.warn(_("%s still exists!\n") % f)
841 elif self._repo.dirstate[f] == 'a':
843 elif self._repo.dirstate[f] == 'a':
842 self._repo.dirstate.forget(f)
844 self._repo.dirstate.forget(f)
843 elif f not in self._repo.dirstate:
845 elif f not in self._repo.dirstate:
844 self._repo.ui.warn(_("%s not tracked!\n") % f)
846 self._repo.ui.warn(_("%s not tracked!\n") % f)
845 else:
847 else:
846 self._repo.dirstate.remove(f)
848 self._repo.dirstate.remove(f)
847 finally:
849 finally:
848 wlock.release()
850 wlock.release()
849
851
850 def undelete(self, list):
852 def undelete(self, list):
851 pctxs = self.parents()
853 pctxs = self.parents()
852 wlock = self._repo.wlock()
854 wlock = self._repo.wlock()
853 try:
855 try:
854 for f in list:
856 for f in list:
855 if self._repo.dirstate[f] != 'r':
857 if self._repo.dirstate[f] != 'r':
856 self._repo.ui.warn(_("%s not removed!\n") % f)
858 self._repo.ui.warn(_("%s not removed!\n") % f)
857 else:
859 else:
858 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
860 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
859 t = fctx.data()
861 t = fctx.data()
860 self._repo.wwrite(f, t, fctx.flags())
862 self._repo.wwrite(f, t, fctx.flags())
861 self._repo.dirstate.normal(f)
863 self._repo.dirstate.normal(f)
862 finally:
864 finally:
863 wlock.release()
865 wlock.release()
864
866
865 def copy(self, source, dest):
867 def copy(self, source, dest):
866 p = self._repo.wjoin(dest)
868 p = self._repo.wjoin(dest)
867 if not os.path.lexists(p):
869 if not os.path.lexists(p):
868 self._repo.ui.warn(_("%s does not exist!\n") % dest)
870 self._repo.ui.warn(_("%s does not exist!\n") % dest)
869 elif not (os.path.isfile(p) or os.path.islink(p)):
871 elif not (os.path.isfile(p) or os.path.islink(p)):
870 self._repo.ui.warn(_("copy failed: %s is not a file or a "
872 self._repo.ui.warn(_("copy failed: %s is not a file or a "
871 "symbolic link\n") % dest)
873 "symbolic link\n") % dest)
872 else:
874 else:
873 wlock = self._repo.wlock()
875 wlock = self._repo.wlock()
874 try:
876 try:
875 if self._repo.dirstate[dest] in '?r':
877 if self._repo.dirstate[dest] in '?r':
876 self._repo.dirstate.add(dest)
878 self._repo.dirstate.add(dest)
877 self._repo.dirstate.copy(source, dest)
879 self._repo.dirstate.copy(source, dest)
878 finally:
880 finally:
879 wlock.release()
881 wlock.release()
880
882
881 class workingfilectx(filectx):
883 class workingfilectx(filectx):
882 """A workingfilectx object makes access to data related to a particular
884 """A workingfilectx object makes access to data related to a particular
883 file in the working directory convenient."""
885 file in the working directory convenient."""
884 def __init__(self, repo, path, filelog=None, workingctx=None):
886 def __init__(self, repo, path, filelog=None, workingctx=None):
885 """changeid can be a changeset revision, node, or tag.
887 """changeid can be a changeset revision, node, or tag.
886 fileid can be a file revision or node."""
888 fileid can be a file revision or node."""
887 self._repo = repo
889 self._repo = repo
888 self._path = path
890 self._path = path
889 self._changeid = None
891 self._changeid = None
890 self._filerev = self._filenode = None
892 self._filerev = self._filenode = None
891
893
892 if filelog:
894 if filelog:
893 self._filelog = filelog
895 self._filelog = filelog
894 if workingctx:
896 if workingctx:
895 self._changectx = workingctx
897 self._changectx = workingctx
896
898
897 @propertycache
899 @propertycache
898 def _changectx(self):
900 def _changectx(self):
899 return workingctx(self._repo)
901 return workingctx(self._repo)
900
902
901 def __nonzero__(self):
903 def __nonzero__(self):
902 return True
904 return True
903
905
904 def __str__(self):
906 def __str__(self):
905 return "%s@%s" % (self.path(), self._changectx)
907 return "%s@%s" % (self.path(), self._changectx)
906
908
907 def __repr__(self):
909 def __repr__(self):
908 return "<workingfilectx %s>" % str(self)
910 return "<workingfilectx %s>" % str(self)
909
911
910 def data(self):
912 def data(self):
911 return self._repo.wread(self._path)
913 return self._repo.wread(self._path)
912 def renamed(self):
914 def renamed(self):
913 rp = self._repo.dirstate.copied(self._path)
915 rp = self._repo.dirstate.copied(self._path)
914 if not rp:
916 if not rp:
915 return None
917 return None
916 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
918 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
917
919
918 def parents(self):
920 def parents(self):
919 '''return parent filectxs, following copies if necessary'''
921 '''return parent filectxs, following copies if necessary'''
920 def filenode(ctx, path):
922 def filenode(ctx, path):
921 return ctx._manifest.get(path, nullid)
923 return ctx._manifest.get(path, nullid)
922
924
923 path = self._path
925 path = self._path
924 fl = self._filelog
926 fl = self._filelog
925 pcl = self._changectx._parents
927 pcl = self._changectx._parents
926 renamed = self.renamed()
928 renamed = self.renamed()
927
929
928 if renamed:
930 if renamed:
929 pl = [renamed + (None,)]
931 pl = [renamed + (None,)]
930 else:
932 else:
931 pl = [(path, filenode(pcl[0], path), fl)]
933 pl = [(path, filenode(pcl[0], path), fl)]
932
934
933 for pc in pcl[1:]:
935 for pc in pcl[1:]:
934 pl.append((path, filenode(pc, path), fl))
936 pl.append((path, filenode(pc, path), fl))
935
937
936 return [filectx(self._repo, p, fileid=n, filelog=l)
938 return [filectx(self._repo, p, fileid=n, filelog=l)
937 for p, n, l in pl if n != nullid]
939 for p, n, l in pl if n != nullid]
938
940
939 def children(self):
941 def children(self):
940 return []
942 return []
941
943
942 def size(self):
944 def size(self):
943 return os.lstat(self._repo.wjoin(self._path)).st_size
945 return os.lstat(self._repo.wjoin(self._path)).st_size
944 def date(self):
946 def date(self):
945 t, tz = self._changectx.date()
947 t, tz = self._changectx.date()
946 try:
948 try:
947 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
949 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
948 except OSError, err:
950 except OSError, err:
949 if err.errno != errno.ENOENT:
951 if err.errno != errno.ENOENT:
950 raise
952 raise
951 return (t, tz)
953 return (t, tz)
952
954
953 def cmp(self, fctx):
955 def cmp(self, fctx):
954 """compare with other file context
956 """compare with other file context
955
957
956 returns True if different than fctx.
958 returns True if different than fctx.
957 """
959 """
958 # fctx should be a filectx (not a wfctx)
960 # fctx should be a filectx (not a wfctx)
959 # invert comparison to reuse the same code path
961 # invert comparison to reuse the same code path
960 return fctx.cmp(self)
962 return fctx.cmp(self)
961
963
962 class memctx(object):
964 class memctx(object):
963 """Use memctx to perform in-memory commits via localrepo.commitctx().
965 """Use memctx to perform in-memory commits via localrepo.commitctx().
964
966
965 Revision information is supplied at initialization time while
967 Revision information is supplied at initialization time while
966 related files data and is made available through a callback
968 related files data and is made available through a callback
967 mechanism. 'repo' is the current localrepo, 'parents' is a
969 mechanism. 'repo' is the current localrepo, 'parents' is a
968 sequence of two parent revisions identifiers (pass None for every
970 sequence of two parent revisions identifiers (pass None for every
969 missing parent), 'text' is the commit message and 'files' lists
971 missing parent), 'text' is the commit message and 'files' lists
970 names of files touched by the revision (normalized and relative to
972 names of files touched by the revision (normalized and relative to
971 repository root).
973 repository root).
972
974
973 filectxfn(repo, memctx, path) is a callable receiving the
975 filectxfn(repo, memctx, path) is a callable receiving the
974 repository, the current memctx object and the normalized path of
976 repository, the current memctx object and the normalized path of
975 requested file, relative to repository root. It is fired by the
977 requested file, relative to repository root. It is fired by the
976 commit function for every file in 'files', but calls order is
978 commit function for every file in 'files', but calls order is
977 undefined. If the file is available in the revision being
979 undefined. If the file is available in the revision being
978 committed (updated or added), filectxfn returns a memfilectx
980 committed (updated or added), filectxfn returns a memfilectx
979 object. If the file was removed, filectxfn raises an
981 object. If the file was removed, filectxfn raises an
980 IOError. Moved files are represented by marking the source file
982 IOError. Moved files are represented by marking the source file
981 removed and the new file added with copy information (see
983 removed and the new file added with copy information (see
982 memfilectx).
984 memfilectx).
983
985
984 user receives the committer name and defaults to current
986 user receives the committer name and defaults to current
985 repository username, date is the commit date in any format
987 repository username, date is the commit date in any format
986 supported by util.parsedate() and defaults to current date, extra
988 supported by util.parsedate() and defaults to current date, extra
987 is a dictionary of metadata or is left empty.
989 is a dictionary of metadata or is left empty.
988 """
990 """
989 def __init__(self, repo, parents, text, files, filectxfn, user=None,
991 def __init__(self, repo, parents, text, files, filectxfn, user=None,
990 date=None, extra=None):
992 date=None, extra=None):
991 self._repo = repo
993 self._repo = repo
992 self._rev = None
994 self._rev = None
993 self._node = None
995 self._node = None
994 self._text = text
996 self._text = text
995 self._date = date and util.parsedate(date) or util.makedate()
997 self._date = date and util.parsedate(date) or util.makedate()
996 self._user = user
998 self._user = user
997 parents = [(p or nullid) for p in parents]
999 parents = [(p or nullid) for p in parents]
998 p1, p2 = parents
1000 p1, p2 = parents
999 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1001 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1000 files = sorted(set(files))
1002 files = sorted(set(files))
1001 self._status = [files, [], [], [], []]
1003 self._status = [files, [], [], [], []]
1002 self._filectxfn = filectxfn
1004 self._filectxfn = filectxfn
1003
1005
1004 self._extra = extra and extra.copy() or {}
1006 self._extra = extra and extra.copy() or {}
1005 if 'branch' not in self._extra:
1007 if 'branch' not in self._extra:
1006 self._extra['branch'] = 'default'
1008 self._extra['branch'] = 'default'
1007 elif self._extra.get('branch') == '':
1009 elif self._extra.get('branch') == '':
1008 self._extra['branch'] = 'default'
1010 self._extra['branch'] = 'default'
1009
1011
1010 def __str__(self):
1012 def __str__(self):
1011 return str(self._parents[0]) + "+"
1013 return str(self._parents[0]) + "+"
1012
1014
1013 def __int__(self):
1015 def __int__(self):
1014 return self._rev
1016 return self._rev
1015
1017
1016 def __nonzero__(self):
1018 def __nonzero__(self):
1017 return True
1019 return True
1018
1020
1019 def __getitem__(self, key):
1021 def __getitem__(self, key):
1020 return self.filectx(key)
1022 return self.filectx(key)
1021
1023
1022 def p1(self):
1024 def p1(self):
1023 return self._parents[0]
1025 return self._parents[0]
1024 def p2(self):
1026 def p2(self):
1025 return self._parents[1]
1027 return self._parents[1]
1026
1028
1027 def user(self):
1029 def user(self):
1028 return self._user or self._repo.ui.username()
1030 return self._user or self._repo.ui.username()
1029 def date(self):
1031 def date(self):
1030 return self._date
1032 return self._date
1031 def description(self):
1033 def description(self):
1032 return self._text
1034 return self._text
1033 def files(self):
1035 def files(self):
1034 return self.modified()
1036 return self.modified()
1035 def modified(self):
1037 def modified(self):
1036 return self._status[0]
1038 return self._status[0]
1037 def added(self):
1039 def added(self):
1038 return self._status[1]
1040 return self._status[1]
1039 def removed(self):
1041 def removed(self):
1040 return self._status[2]
1042 return self._status[2]
1041 def deleted(self):
1043 def deleted(self):
1042 return self._status[3]
1044 return self._status[3]
1043 def unknown(self):
1045 def unknown(self):
1044 return self._status[4]
1046 return self._status[4]
1045 def ignored(self):
1047 def ignored(self):
1046 return self._status[5]
1048 return self._status[5]
1047 def clean(self):
1049 def clean(self):
1048 return self._status[6]
1050 return self._status[6]
1049 def branch(self):
1051 def branch(self):
1050 return encoding.tolocal(self._extra['branch'])
1052 return encoding.tolocal(self._extra['branch'])
1051 def extra(self):
1053 def extra(self):
1052 return self._extra
1054 return self._extra
1053 def flags(self, f):
1055 def flags(self, f):
1054 return self[f].flags()
1056 return self[f].flags()
1055
1057
1056 def parents(self):
1058 def parents(self):
1057 """return contexts for each parent changeset"""
1059 """return contexts for each parent changeset"""
1058 return self._parents
1060 return self._parents
1059
1061
1060 def filectx(self, path, filelog=None):
1062 def filectx(self, path, filelog=None):
1061 """get a file context from the working directory"""
1063 """get a file context from the working directory"""
1062 return self._filectxfn(self._repo, self, path)
1064 return self._filectxfn(self._repo, self, path)
1063
1065
1064 def commit(self):
1066 def commit(self):
1065 """commit context to the repo"""
1067 """commit context to the repo"""
1066 return self._repo.commitctx(self)
1068 return self._repo.commitctx(self)
1067
1069
1068 class memfilectx(object):
1070 class memfilectx(object):
1069 """memfilectx represents an in-memory file to commit.
1071 """memfilectx represents an in-memory file to commit.
1070
1072
1071 See memctx for more details.
1073 See memctx for more details.
1072 """
1074 """
1073 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1075 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1074 """
1076 """
1075 path is the normalized file path relative to repository root.
1077 path is the normalized file path relative to repository root.
1076 data is the file content as a string.
1078 data is the file content as a string.
1077 islink is True if the file is a symbolic link.
1079 islink is True if the file is a symbolic link.
1078 isexec is True if the file is executable.
1080 isexec is True if the file is executable.
1079 copied is the source file path if current file was copied in the
1081 copied is the source file path if current file was copied in the
1080 revision being committed, or None."""
1082 revision being committed, or None."""
1081 self._path = path
1083 self._path = path
1082 self._data = data
1084 self._data = data
1083 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1085 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1084 self._copied = None
1086 self._copied = None
1085 if copied:
1087 if copied:
1086 self._copied = (copied, nullid)
1088 self._copied = (copied, nullid)
1087
1089
1088 def __nonzero__(self):
1090 def __nonzero__(self):
1089 return True
1091 return True
1090 def __str__(self):
1092 def __str__(self):
1091 return "%s@%s" % (self.path(), self._changectx)
1093 return "%s@%s" % (self.path(), self._changectx)
1092 def path(self):
1094 def path(self):
1093 return self._path
1095 return self._path
1094 def data(self):
1096 def data(self):
1095 return self._data
1097 return self._data
1096 def flags(self):
1098 def flags(self):
1097 return self._flags
1099 return self._flags
1098 def isexec(self):
1100 def isexec(self):
1099 return 'x' in self._flags
1101 return 'x' in self._flags
1100 def islink(self):
1102 def islink(self):
1101 return 'l' in self._flags
1103 return 'l' in self._flags
1102 def renamed(self):
1104 def renamed(self):
1103 return self._copied
1105 return self._copied
@@ -1,2006 +1,2013
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @util.propertycache
164 @util.propertycache
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @util.propertycache
168 @util.propertycache
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 @propertycache
172 @propertycache
173 def changelog(self):
173 def changelog(self):
174 c = changelog.changelog(self.sopener)
174 c = changelog.changelog(self.sopener)
175 if 'HG_PENDING' in os.environ:
175 if 'HG_PENDING' in os.environ:
176 p = os.environ['HG_PENDING']
176 p = os.environ['HG_PENDING']
177 if p.startswith(self.root):
177 if p.startswith(self.root):
178 c.readpending('00changelog.i.a')
178 c.readpending('00changelog.i.a')
179 self.sopener.options['defversion'] = c.version
179 self.sopener.options['defversion'] = c.version
180 return c
180 return c
181
181
182 @propertycache
182 @propertycache
183 def manifest(self):
183 def manifest(self):
184 return manifest.manifest(self.sopener)
184 return manifest.manifest(self.sopener)
185
185
186 @propertycache
186 @propertycache
187 def dirstate(self):
187 def dirstate(self):
188 warned = [0]
188 warned = [0]
189 def validate(node):
189 def validate(node):
190 try:
190 try:
191 r = self.changelog.rev(node)
191 r = self.changelog.rev(node)
192 return node
192 return node
193 except error.LookupError:
193 except error.LookupError:
194 if not warned[0]:
194 if not warned[0]:
195 warned[0] = True
195 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
196 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
197 " working parent %s!\n") % short(node))
198 return nullid
198 return nullid
199
199
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
201
202 def __getitem__(self, changeid):
202 def __getitem__(self, changeid):
203 if changeid is None:
203 if changeid is None:
204 return context.workingctx(self)
204 return context.workingctx(self)
205 return context.changectx(self, changeid)
205 return context.changectx(self, changeid)
206
206
207 def __contains__(self, changeid):
207 def __contains__(self, changeid):
208 try:
208 try:
209 return bool(self.lookup(changeid))
209 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
210 except error.RepoLookupError:
211 return False
211 return False
212
212
213 def __nonzero__(self):
213 def __nonzero__(self):
214 return True
214 return True
215
215
216 def __len__(self):
216 def __len__(self):
217 return len(self.changelog)
217 return len(self.changelog)
218
218
219 def __iter__(self):
219 def __iter__(self):
220 for i in xrange(len(self)):
220 for i in xrange(len(self)):
221 yield i
221 yield i
222
222
223 def url(self):
223 def url(self):
224 return 'file:' + self.root
224 return 'file:' + self.root
225
225
226 def hook(self, name, throw=False, **args):
226 def hook(self, name, throw=False, **args):
227 return hook.hook(self.ui, self, name, throw, **args)
227 return hook.hook(self.ui, self, name, throw, **args)
228
228
229 tag_disallowed = ':\r\n'
229 tag_disallowed = ':\r\n'
230
230
231 def _tag(self, names, node, message, local, user, date, extra={}):
231 def _tag(self, names, node, message, local, user, date, extra={}):
232 if isinstance(names, str):
232 if isinstance(names, str):
233 allchars = names
233 allchars = names
234 names = (names,)
234 names = (names,)
235 else:
235 else:
236 allchars = ''.join(names)
236 allchars = ''.join(names)
237 for c in self.tag_disallowed:
237 for c in self.tag_disallowed:
238 if c in allchars:
238 if c in allchars:
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
240
240
241 branches = self.branchmap()
241 branches = self.branchmap()
242 for name in names:
242 for name in names:
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 local=local)
244 local=local)
245 if name in branches:
245 if name in branches:
246 self.ui.warn(_("warning: tag %s conflicts with existing"
246 self.ui.warn(_("warning: tag %s conflicts with existing"
247 " branch name\n") % name)
247 " branch name\n") % name)
248
248
249 def writetags(fp, names, munge, prevtags):
249 def writetags(fp, names, munge, prevtags):
250 fp.seek(0, 2)
250 fp.seek(0, 2)
251 if prevtags and prevtags[-1] != '\n':
251 if prevtags and prevtags[-1] != '\n':
252 fp.write('\n')
252 fp.write('\n')
253 for name in names:
253 for name in names:
254 m = munge and munge(name) or name
254 m = munge and munge(name) or name
255 if self._tagtypes and name in self._tagtypes:
255 if self._tagtypes and name in self._tagtypes:
256 old = self._tags.get(name, nullid)
256 old = self._tags.get(name, nullid)
257 fp.write('%s %s\n' % (hex(old), m))
257 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(node), m))
258 fp.write('%s %s\n' % (hex(node), m))
259 fp.close()
259 fp.close()
260
260
261 prevtags = ''
261 prevtags = ''
262 if local:
262 if local:
263 try:
263 try:
264 fp = self.opener('localtags', 'r+')
264 fp = self.opener('localtags', 'r+')
265 except IOError:
265 except IOError:
266 fp = self.opener('localtags', 'a')
266 fp = self.opener('localtags', 'a')
267 else:
267 else:
268 prevtags = fp.read()
268 prevtags = fp.read()
269
269
270 # local tags are stored in the current charset
270 # local tags are stored in the current charset
271 writetags(fp, names, None, prevtags)
271 writetags(fp, names, None, prevtags)
272 for name in names:
272 for name in names:
273 self.hook('tag', node=hex(node), tag=name, local=local)
273 self.hook('tag', node=hex(node), tag=name, local=local)
274 return
274 return
275
275
276 try:
276 try:
277 fp = self.wfile('.hgtags', 'rb+')
277 fp = self.wfile('.hgtags', 'rb+')
278 except IOError:
278 except IOError:
279 fp = self.wfile('.hgtags', 'ab')
279 fp = self.wfile('.hgtags', 'ab')
280 else:
280 else:
281 prevtags = fp.read()
281 prevtags = fp.read()
282
282
283 # committed tags are stored in UTF-8
283 # committed tags are stored in UTF-8
284 writetags(fp, names, encoding.fromlocal, prevtags)
284 writetags(fp, names, encoding.fromlocal, prevtags)
285
285
286 if '.hgtags' not in self.dirstate:
286 if '.hgtags' not in self.dirstate:
287 self[None].add(['.hgtags'])
287 self[None].add(['.hgtags'])
288
288
289 m = matchmod.exact(self.root, '', ['.hgtags'])
289 m = matchmod.exact(self.root, '', ['.hgtags'])
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
291
291
292 for name in names:
292 for name in names:
293 self.hook('tag', node=hex(node), tag=name, local=local)
293 self.hook('tag', node=hex(node), tag=name, local=local)
294
294
295 return tagnode
295 return tagnode
296
296
297 def tag(self, names, node, message, local, user, date):
297 def tag(self, names, node, message, local, user, date):
298 '''tag a revision with one or more symbolic names.
298 '''tag a revision with one or more symbolic names.
299
299
300 names is a list of strings or, when adding a single tag, names may be a
300 names is a list of strings or, when adding a single tag, names may be a
301 string.
301 string.
302
302
303 if local is True, the tags are stored in a per-repository file.
303 if local is True, the tags are stored in a per-repository file.
304 otherwise, they are stored in the .hgtags file, and a new
304 otherwise, they are stored in the .hgtags file, and a new
305 changeset is committed with the change.
305 changeset is committed with the change.
306
306
307 keyword arguments:
307 keyword arguments:
308
308
309 local: whether to store tags in non-version-controlled file
309 local: whether to store tags in non-version-controlled file
310 (default False)
310 (default False)
311
311
312 message: commit message to use if committing
312 message: commit message to use if committing
313
313
314 user: name of user to use if committing
314 user: name of user to use if committing
315
315
316 date: date tuple to use if committing'''
316 date: date tuple to use if committing'''
317
317
318 if not local:
318 if not local:
319 for x in self.status()[:5]:
319 for x in self.status()[:5]:
320 if '.hgtags' in x:
320 if '.hgtags' in x:
321 raise util.Abort(_('working copy of .hgtags is changed '
321 raise util.Abort(_('working copy of .hgtags is changed '
322 '(please commit .hgtags manually)'))
322 '(please commit .hgtags manually)'))
323
323
324 self.tags() # instantiate the cache
324 self.tags() # instantiate the cache
325 self._tag(names, node, message, local, user, date)
325 self._tag(names, node, message, local, user, date)
326
326
327 def tags(self):
327 def tags(self):
328 '''return a mapping of tag to node'''
328 '''return a mapping of tag to node'''
329 if self._tags is None:
329 if self._tags is None:
330 (self._tags, self._tagtypes) = self._findtags()
330 (self._tags, self._tagtypes) = self._findtags()
331
331
332 return self._tags
332 return self._tags
333
333
334 def _findtags(self):
334 def _findtags(self):
335 '''Do the hard work of finding tags. Return a pair of dicts
335 '''Do the hard work of finding tags. Return a pair of dicts
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 maps tag name to a string like \'global\' or \'local\'.
337 maps tag name to a string like \'global\' or \'local\'.
338 Subclasses or extensions are free to add their own tags, but
338 Subclasses or extensions are free to add their own tags, but
339 should be aware that the returned dicts will be retained for the
339 should be aware that the returned dicts will be retained for the
340 duration of the localrepo object.'''
340 duration of the localrepo object.'''
341
341
342 # XXX what tagtype should subclasses/extensions use? Currently
342 # XXX what tagtype should subclasses/extensions use? Currently
343 # mq and bookmarks add tags, but do not set the tagtype at all.
343 # mq and bookmarks add tags, but do not set the tagtype at all.
344 # Should each extension invent its own tag type? Should there
344 # Should each extension invent its own tag type? Should there
345 # be one tagtype for all such "virtual" tags? Or is the status
345 # be one tagtype for all such "virtual" tags? Or is the status
346 # quo fine?
346 # quo fine?
347
347
348 alltags = {} # map tag name to (node, hist)
348 alltags = {} # map tag name to (node, hist)
349 tagtypes = {}
349 tagtypes = {}
350
350
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353
353
354 # Build the return dicts. Have to re-encode tag names because
354 # Build the return dicts. Have to re-encode tag names because
355 # the tags module always uses UTF-8 (in order not to lose info
355 # the tags module always uses UTF-8 (in order not to lose info
356 # writing to the cache), but the rest of Mercurial wants them in
356 # writing to the cache), but the rest of Mercurial wants them in
357 # local encoding.
357 # local encoding.
358 tags = {}
358 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
359 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
360 if node != nullid:
361 tags[encoding.tolocal(name)] = node
361 tags[encoding.tolocal(name)] = node
362 tags['tip'] = self.changelog.tip()
362 tags['tip'] = self.changelog.tip()
363 tags.update(self._bookmarks)
363 tags.update(self._bookmarks)
364 tagtypes = dict([(encoding.tolocal(name), value)
364 tagtypes = dict([(encoding.tolocal(name), value)
365 for (name, value) in tagtypes.iteritems()])
365 for (name, value) in tagtypes.iteritems()])
366 return (tags, tagtypes)
366 return (tags, tagtypes)
367
367
368 def tagtype(self, tagname):
368 def tagtype(self, tagname):
369 '''
369 '''
370 return the type of the given tag. result can be:
370 return the type of the given tag. result can be:
371
371
372 'local' : a local tag
372 'local' : a local tag
373 'global' : a global tag
373 'global' : a global tag
374 None : tag does not exist
374 None : tag does not exist
375 '''
375 '''
376
376
377 self.tags()
377 self.tags()
378
378
379 return self._tagtypes.get(tagname)
379 return self._tagtypes.get(tagname)
380
380
381 def tagslist(self):
381 def tagslist(self):
382 '''return a list of tags ordered by revision'''
382 '''return a list of tags ordered by revision'''
383 l = []
383 l = []
384 for t, n in self.tags().iteritems():
384 for t, n in self.tags().iteritems():
385 try:
385 try:
386 r = self.changelog.rev(n)
386 r = self.changelog.rev(n)
387 except:
387 except:
388 r = -2 # sort to the beginning of the list if unknown
388 r = -2 # sort to the beginning of the list if unknown
389 l.append((r, t, n))
389 l.append((r, t, n))
390 return [(t, n) for r, t, n in sorted(l)]
390 return [(t, n) for r, t, n in sorted(l)]
391
391
392 def nodetags(self, node):
392 def nodetags(self, node):
393 '''return the tags associated with a node'''
393 '''return the tags associated with a node'''
394 if not self.nodetagscache:
394 if not self.nodetagscache:
395 self.nodetagscache = {}
395 self.nodetagscache = {}
396 for t, n in self.tags().iteritems():
396 for t, n in self.tags().iteritems():
397 self.nodetagscache.setdefault(n, []).append(t)
397 self.nodetagscache.setdefault(n, []).append(t)
398 for tags in self.nodetagscache.itervalues():
398 for tags in self.nodetagscache.itervalues():
399 tags.sort()
399 tags.sort()
400 return self.nodetagscache.get(node, [])
400 return self.nodetagscache.get(node, [])
401
401
402 def nodebookmarks(self, node):
403 marks = []
404 for bookmark, n in self._bookmarks.iteritems():
405 if n == node:
406 marks.append(bookmark)
407 return sorted(marks)
408
402 def _branchtags(self, partial, lrev):
409 def _branchtags(self, partial, lrev):
403 # TODO: rename this function?
410 # TODO: rename this function?
404 tiprev = len(self) - 1
411 tiprev = len(self) - 1
405 if lrev != tiprev:
412 if lrev != tiprev:
406 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
413 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
407 self._updatebranchcache(partial, ctxgen)
414 self._updatebranchcache(partial, ctxgen)
408 self._writebranchcache(partial, self.changelog.tip(), tiprev)
415 self._writebranchcache(partial, self.changelog.tip(), tiprev)
409
416
410 return partial
417 return partial
411
418
412 def updatebranchcache(self):
419 def updatebranchcache(self):
413 tip = self.changelog.tip()
420 tip = self.changelog.tip()
414 if self._branchcache is not None and self._branchcachetip == tip:
421 if self._branchcache is not None and self._branchcachetip == tip:
415 return self._branchcache
422 return self._branchcache
416
423
417 oldtip = self._branchcachetip
424 oldtip = self._branchcachetip
418 self._branchcachetip = tip
425 self._branchcachetip = tip
419 if oldtip is None or oldtip not in self.changelog.nodemap:
426 if oldtip is None or oldtip not in self.changelog.nodemap:
420 partial, last, lrev = self._readbranchcache()
427 partial, last, lrev = self._readbranchcache()
421 else:
428 else:
422 lrev = self.changelog.rev(oldtip)
429 lrev = self.changelog.rev(oldtip)
423 partial = self._branchcache
430 partial = self._branchcache
424
431
425 self._branchtags(partial, lrev)
432 self._branchtags(partial, lrev)
426 # this private cache holds all heads (not just tips)
433 # this private cache holds all heads (not just tips)
427 self._branchcache = partial
434 self._branchcache = partial
428
435
429 def branchmap(self):
436 def branchmap(self):
430 '''returns a dictionary {branch: [branchheads]}'''
437 '''returns a dictionary {branch: [branchheads]}'''
431 self.updatebranchcache()
438 self.updatebranchcache()
432 return self._branchcache
439 return self._branchcache
433
440
434 def branchtags(self):
441 def branchtags(self):
435 '''return a dict where branch names map to the tipmost head of
442 '''return a dict where branch names map to the tipmost head of
436 the branch, open heads come before closed'''
443 the branch, open heads come before closed'''
437 bt = {}
444 bt = {}
438 for bn, heads in self.branchmap().iteritems():
445 for bn, heads in self.branchmap().iteritems():
439 tip = heads[-1]
446 tip = heads[-1]
440 for h in reversed(heads):
447 for h in reversed(heads):
441 if 'close' not in self.changelog.read(h)[5]:
448 if 'close' not in self.changelog.read(h)[5]:
442 tip = h
449 tip = h
443 break
450 break
444 bt[bn] = tip
451 bt[bn] = tip
445 return bt
452 return bt
446
453
447 def _readbranchcache(self):
454 def _readbranchcache(self):
448 partial = {}
455 partial = {}
449 try:
456 try:
450 f = self.opener("cache/branchheads")
457 f = self.opener("cache/branchheads")
451 lines = f.read().split('\n')
458 lines = f.read().split('\n')
452 f.close()
459 f.close()
453 except (IOError, OSError):
460 except (IOError, OSError):
454 return {}, nullid, nullrev
461 return {}, nullid, nullrev
455
462
456 try:
463 try:
457 last, lrev = lines.pop(0).split(" ", 1)
464 last, lrev = lines.pop(0).split(" ", 1)
458 last, lrev = bin(last), int(lrev)
465 last, lrev = bin(last), int(lrev)
459 if lrev >= len(self) or self[lrev].node() != last:
466 if lrev >= len(self) or self[lrev].node() != last:
460 # invalidate the cache
467 # invalidate the cache
461 raise ValueError('invalidating branch cache (tip differs)')
468 raise ValueError('invalidating branch cache (tip differs)')
462 for l in lines:
469 for l in lines:
463 if not l:
470 if not l:
464 continue
471 continue
465 node, label = l.split(" ", 1)
472 node, label = l.split(" ", 1)
466 label = encoding.tolocal(label.strip())
473 label = encoding.tolocal(label.strip())
467 partial.setdefault(label, []).append(bin(node))
474 partial.setdefault(label, []).append(bin(node))
468 except KeyboardInterrupt:
475 except KeyboardInterrupt:
469 raise
476 raise
470 except Exception, inst:
477 except Exception, inst:
471 if self.ui.debugflag:
478 if self.ui.debugflag:
472 self.ui.warn(str(inst), '\n')
479 self.ui.warn(str(inst), '\n')
473 partial, last, lrev = {}, nullid, nullrev
480 partial, last, lrev = {}, nullid, nullrev
474 return partial, last, lrev
481 return partial, last, lrev
475
482
476 def _writebranchcache(self, branches, tip, tiprev):
483 def _writebranchcache(self, branches, tip, tiprev):
477 try:
484 try:
478 f = self.opener("cache/branchheads", "w", atomictemp=True)
485 f = self.opener("cache/branchheads", "w", atomictemp=True)
479 f.write("%s %s\n" % (hex(tip), tiprev))
486 f.write("%s %s\n" % (hex(tip), tiprev))
480 for label, nodes in branches.iteritems():
487 for label, nodes in branches.iteritems():
481 for node in nodes:
488 for node in nodes:
482 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
489 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
483 f.rename()
490 f.rename()
484 except (IOError, OSError):
491 except (IOError, OSError):
485 pass
492 pass
486
493
487 def _updatebranchcache(self, partial, ctxgen):
494 def _updatebranchcache(self, partial, ctxgen):
488 # collect new branch entries
495 # collect new branch entries
489 newbranches = {}
496 newbranches = {}
490 for c in ctxgen:
497 for c in ctxgen:
491 newbranches.setdefault(c.branch(), []).append(c.node())
498 newbranches.setdefault(c.branch(), []).append(c.node())
492 # if older branchheads are reachable from new ones, they aren't
499 # if older branchheads are reachable from new ones, they aren't
493 # really branchheads. Note checking parents is insufficient:
500 # really branchheads. Note checking parents is insufficient:
494 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
501 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
495 for branch, newnodes in newbranches.iteritems():
502 for branch, newnodes in newbranches.iteritems():
496 bheads = partial.setdefault(branch, [])
503 bheads = partial.setdefault(branch, [])
497 bheads.extend(newnodes)
504 bheads.extend(newnodes)
498 if len(bheads) <= 1:
505 if len(bheads) <= 1:
499 continue
506 continue
500 # starting from tip means fewer passes over reachable
507 # starting from tip means fewer passes over reachable
501 while newnodes:
508 while newnodes:
502 latest = newnodes.pop()
509 latest = newnodes.pop()
503 if latest not in bheads:
510 if latest not in bheads:
504 continue
511 continue
505 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
512 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
506 reachable = self.changelog.reachable(latest, minbhrev)
513 reachable = self.changelog.reachable(latest, minbhrev)
507 reachable.remove(latest)
514 reachable.remove(latest)
508 bheads = [b for b in bheads if b not in reachable]
515 bheads = [b for b in bheads if b not in reachable]
509 partial[branch] = bheads
516 partial[branch] = bheads
510
517
511 def lookup(self, key):
518 def lookup(self, key):
512 if isinstance(key, int):
519 if isinstance(key, int):
513 return self.changelog.node(key)
520 return self.changelog.node(key)
514 elif key == '.':
521 elif key == '.':
515 return self.dirstate.parents()[0]
522 return self.dirstate.parents()[0]
516 elif key == 'null':
523 elif key == 'null':
517 return nullid
524 return nullid
518 elif key == 'tip':
525 elif key == 'tip':
519 return self.changelog.tip()
526 return self.changelog.tip()
520 n = self.changelog._match(key)
527 n = self.changelog._match(key)
521 if n:
528 if n:
522 return n
529 return n
523 if key in self._bookmarks:
530 if key in self._bookmarks:
524 return self._bookmarks[key]
531 return self._bookmarks[key]
525 if key in self.tags():
532 if key in self.tags():
526 return self.tags()[key]
533 return self.tags()[key]
527 if key in self.branchtags():
534 if key in self.branchtags():
528 return self.branchtags()[key]
535 return self.branchtags()[key]
529 n = self.changelog._partialmatch(key)
536 n = self.changelog._partialmatch(key)
530 if n:
537 if n:
531 return n
538 return n
532
539
533 # can't find key, check if it might have come from damaged dirstate
540 # can't find key, check if it might have come from damaged dirstate
534 if key in self.dirstate.parents():
541 if key in self.dirstate.parents():
535 raise error.Abort(_("working directory has unknown parent '%s'!")
542 raise error.Abort(_("working directory has unknown parent '%s'!")
536 % short(key))
543 % short(key))
537 try:
544 try:
538 if len(key) == 20:
545 if len(key) == 20:
539 key = hex(key)
546 key = hex(key)
540 except:
547 except:
541 pass
548 pass
542 raise error.RepoLookupError(_("unknown revision '%s'") % key)
549 raise error.RepoLookupError(_("unknown revision '%s'") % key)
543
550
544 def lookupbranch(self, key, remote=None):
551 def lookupbranch(self, key, remote=None):
545 repo = remote or self
552 repo = remote or self
546 if key in repo.branchmap():
553 if key in repo.branchmap():
547 return key
554 return key
548
555
549 repo = (remote and remote.local()) and remote or self
556 repo = (remote and remote.local()) and remote or self
550 return repo[key].branch()
557 return repo[key].branch()
551
558
552 def local(self):
559 def local(self):
553 return True
560 return True
554
561
555 def join(self, f):
562 def join(self, f):
556 return os.path.join(self.path, f)
563 return os.path.join(self.path, f)
557
564
558 def wjoin(self, f):
565 def wjoin(self, f):
559 return os.path.join(self.root, f)
566 return os.path.join(self.root, f)
560
567
561 def file(self, f):
568 def file(self, f):
562 if f[0] == '/':
569 if f[0] == '/':
563 f = f[1:]
570 f = f[1:]
564 return filelog.filelog(self.sopener, f)
571 return filelog.filelog(self.sopener, f)
565
572
566 def changectx(self, changeid):
573 def changectx(self, changeid):
567 return self[changeid]
574 return self[changeid]
568
575
569 def parents(self, changeid=None):
576 def parents(self, changeid=None):
570 '''get list of changectxs for parents of changeid'''
577 '''get list of changectxs for parents of changeid'''
571 return self[changeid].parents()
578 return self[changeid].parents()
572
579
573 def filectx(self, path, changeid=None, fileid=None):
580 def filectx(self, path, changeid=None, fileid=None):
574 """changeid can be a changeset revision, node, or tag.
581 """changeid can be a changeset revision, node, or tag.
575 fileid can be a file revision or node."""
582 fileid can be a file revision or node."""
576 return context.filectx(self, path, changeid, fileid)
583 return context.filectx(self, path, changeid, fileid)
577
584
578 def getcwd(self):
585 def getcwd(self):
579 return self.dirstate.getcwd()
586 return self.dirstate.getcwd()
580
587
581 def pathto(self, f, cwd=None):
588 def pathto(self, f, cwd=None):
582 return self.dirstate.pathto(f, cwd)
589 return self.dirstate.pathto(f, cwd)
583
590
584 def wfile(self, f, mode='r'):
591 def wfile(self, f, mode='r'):
585 return self.wopener(f, mode)
592 return self.wopener(f, mode)
586
593
587 def _link(self, f):
594 def _link(self, f):
588 return os.path.islink(self.wjoin(f))
595 return os.path.islink(self.wjoin(f))
589
596
590 def _loadfilter(self, filter):
597 def _loadfilter(self, filter):
591 if filter not in self.filterpats:
598 if filter not in self.filterpats:
592 l = []
599 l = []
593 for pat, cmd in self.ui.configitems(filter):
600 for pat, cmd in self.ui.configitems(filter):
594 if cmd == '!':
601 if cmd == '!':
595 continue
602 continue
596 mf = matchmod.match(self.root, '', [pat])
603 mf = matchmod.match(self.root, '', [pat])
597 fn = None
604 fn = None
598 params = cmd
605 params = cmd
599 for name, filterfn in self._datafilters.iteritems():
606 for name, filterfn in self._datafilters.iteritems():
600 if cmd.startswith(name):
607 if cmd.startswith(name):
601 fn = filterfn
608 fn = filterfn
602 params = cmd[len(name):].lstrip()
609 params = cmd[len(name):].lstrip()
603 break
610 break
604 if not fn:
611 if not fn:
605 fn = lambda s, c, **kwargs: util.filter(s, c)
612 fn = lambda s, c, **kwargs: util.filter(s, c)
606 # Wrap old filters not supporting keyword arguments
613 # Wrap old filters not supporting keyword arguments
607 if not inspect.getargspec(fn)[2]:
614 if not inspect.getargspec(fn)[2]:
608 oldfn = fn
615 oldfn = fn
609 fn = lambda s, c, **kwargs: oldfn(s, c)
616 fn = lambda s, c, **kwargs: oldfn(s, c)
610 l.append((mf, fn, params))
617 l.append((mf, fn, params))
611 self.filterpats[filter] = l
618 self.filterpats[filter] = l
612 return self.filterpats[filter]
619 return self.filterpats[filter]
613
620
614 def _filter(self, filterpats, filename, data):
621 def _filter(self, filterpats, filename, data):
615 for mf, fn, cmd in filterpats:
622 for mf, fn, cmd in filterpats:
616 if mf(filename):
623 if mf(filename):
617 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
624 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
618 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
625 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
619 break
626 break
620
627
621 return data
628 return data
622
629
623 @propertycache
630 @propertycache
624 def _encodefilterpats(self):
631 def _encodefilterpats(self):
625 return self._loadfilter('encode')
632 return self._loadfilter('encode')
626
633
627 @propertycache
634 @propertycache
628 def _decodefilterpats(self):
635 def _decodefilterpats(self):
629 return self._loadfilter('decode')
636 return self._loadfilter('decode')
630
637
631 def adddatafilter(self, name, filter):
638 def adddatafilter(self, name, filter):
632 self._datafilters[name] = filter
639 self._datafilters[name] = filter
633
640
634 def wread(self, filename):
641 def wread(self, filename):
635 if self._link(filename):
642 if self._link(filename):
636 data = os.readlink(self.wjoin(filename))
643 data = os.readlink(self.wjoin(filename))
637 else:
644 else:
638 data = self.wopener(filename, 'r').read()
645 data = self.wopener(filename, 'r').read()
639 return self._filter(self._encodefilterpats, filename, data)
646 return self._filter(self._encodefilterpats, filename, data)
640
647
641 def wwrite(self, filename, data, flags):
648 def wwrite(self, filename, data, flags):
642 data = self._filter(self._decodefilterpats, filename, data)
649 data = self._filter(self._decodefilterpats, filename, data)
643 if 'l' in flags:
650 if 'l' in flags:
644 self.wopener.symlink(data, filename)
651 self.wopener.symlink(data, filename)
645 else:
652 else:
646 self.wopener(filename, 'w').write(data)
653 self.wopener(filename, 'w').write(data)
647 if 'x' in flags:
654 if 'x' in flags:
648 util.set_flags(self.wjoin(filename), False, True)
655 util.set_flags(self.wjoin(filename), False, True)
649
656
650 def wwritedata(self, filename, data):
657 def wwritedata(self, filename, data):
651 return self._filter(self._decodefilterpats, filename, data)
658 return self._filter(self._decodefilterpats, filename, data)
652
659
653 def transaction(self, desc):
660 def transaction(self, desc):
654 tr = self._transref and self._transref() or None
661 tr = self._transref and self._transref() or None
655 if tr and tr.running():
662 if tr and tr.running():
656 return tr.nest()
663 return tr.nest()
657
664
658 # abort here if the journal already exists
665 # abort here if the journal already exists
659 if os.path.exists(self.sjoin("journal")):
666 if os.path.exists(self.sjoin("journal")):
660 raise error.RepoError(
667 raise error.RepoError(
661 _("abandoned transaction found - run hg recover"))
668 _("abandoned transaction found - run hg recover"))
662
669
663 # save dirstate for rollback
670 # save dirstate for rollback
664 try:
671 try:
665 ds = self.opener("dirstate").read()
672 ds = self.opener("dirstate").read()
666 except IOError:
673 except IOError:
667 ds = ""
674 ds = ""
668 self.opener("journal.dirstate", "w").write(ds)
675 self.opener("journal.dirstate", "w").write(ds)
669 self.opener("journal.branch", "w").write(
676 self.opener("journal.branch", "w").write(
670 encoding.fromlocal(self.dirstate.branch()))
677 encoding.fromlocal(self.dirstate.branch()))
671 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
678 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
672
679
673 renames = [(self.sjoin("journal"), self.sjoin("undo")),
680 renames = [(self.sjoin("journal"), self.sjoin("undo")),
674 (self.join("journal.dirstate"), self.join("undo.dirstate")),
681 (self.join("journal.dirstate"), self.join("undo.dirstate")),
675 (self.join("journal.branch"), self.join("undo.branch")),
682 (self.join("journal.branch"), self.join("undo.branch")),
676 (self.join("journal.desc"), self.join("undo.desc"))]
683 (self.join("journal.desc"), self.join("undo.desc"))]
677 tr = transaction.transaction(self.ui.warn, self.sopener,
684 tr = transaction.transaction(self.ui.warn, self.sopener,
678 self.sjoin("journal"),
685 self.sjoin("journal"),
679 aftertrans(renames),
686 aftertrans(renames),
680 self.store.createmode)
687 self.store.createmode)
681 self._transref = weakref.ref(tr)
688 self._transref = weakref.ref(tr)
682 return tr
689 return tr
683
690
684 def recover(self):
691 def recover(self):
685 lock = self.lock()
692 lock = self.lock()
686 try:
693 try:
687 if os.path.exists(self.sjoin("journal")):
694 if os.path.exists(self.sjoin("journal")):
688 self.ui.status(_("rolling back interrupted transaction\n"))
695 self.ui.status(_("rolling back interrupted transaction\n"))
689 transaction.rollback(self.sopener, self.sjoin("journal"),
696 transaction.rollback(self.sopener, self.sjoin("journal"),
690 self.ui.warn)
697 self.ui.warn)
691 self.invalidate()
698 self.invalidate()
692 return True
699 return True
693 else:
700 else:
694 self.ui.warn(_("no interrupted transaction available\n"))
701 self.ui.warn(_("no interrupted transaction available\n"))
695 return False
702 return False
696 finally:
703 finally:
697 lock.release()
704 lock.release()
698
705
699 def rollback(self, dryrun=False):
706 def rollback(self, dryrun=False):
700 wlock = lock = None
707 wlock = lock = None
701 try:
708 try:
702 wlock = self.wlock()
709 wlock = self.wlock()
703 lock = self.lock()
710 lock = self.lock()
704 if os.path.exists(self.sjoin("undo")):
711 if os.path.exists(self.sjoin("undo")):
705 try:
712 try:
706 args = self.opener("undo.desc", "r").read().splitlines()
713 args = self.opener("undo.desc", "r").read().splitlines()
707 if len(args) >= 3 and self.ui.verbose:
714 if len(args) >= 3 and self.ui.verbose:
708 desc = _("rolling back to revision %s"
715 desc = _("rolling back to revision %s"
709 " (undo %s: %s)\n") % (
716 " (undo %s: %s)\n") % (
710 int(args[0]) - 1, args[1], args[2])
717 int(args[0]) - 1, args[1], args[2])
711 elif len(args) >= 2:
718 elif len(args) >= 2:
712 desc = _("rolling back to revision %s (undo %s)\n") % (
719 desc = _("rolling back to revision %s (undo %s)\n") % (
713 int(args[0]) - 1, args[1])
720 int(args[0]) - 1, args[1])
714 except IOError:
721 except IOError:
715 desc = _("rolling back unknown transaction\n")
722 desc = _("rolling back unknown transaction\n")
716 self.ui.status(desc)
723 self.ui.status(desc)
717 if dryrun:
724 if dryrun:
718 return
725 return
719 transaction.rollback(self.sopener, self.sjoin("undo"),
726 transaction.rollback(self.sopener, self.sjoin("undo"),
720 self.ui.warn)
727 self.ui.warn)
721 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
728 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
722 if os.path.exists(self.join('undo.bookmarks')):
729 if os.path.exists(self.join('undo.bookmarks')):
723 util.rename(self.join('undo.bookmarks'),
730 util.rename(self.join('undo.bookmarks'),
724 self.join('bookmarks'))
731 self.join('bookmarks'))
725 try:
732 try:
726 branch = self.opener("undo.branch").read()
733 branch = self.opener("undo.branch").read()
727 self.dirstate.setbranch(branch)
734 self.dirstate.setbranch(branch)
728 except IOError:
735 except IOError:
729 self.ui.warn(_("Named branch could not be reset, "
736 self.ui.warn(_("Named branch could not be reset, "
730 "current branch still is: %s\n")
737 "current branch still is: %s\n")
731 % self.dirstate.branch())
738 % self.dirstate.branch())
732 self.invalidate()
739 self.invalidate()
733 self.dirstate.invalidate()
740 self.dirstate.invalidate()
734 self.destroyed()
741 self.destroyed()
735 else:
742 else:
736 self.ui.warn(_("no rollback information available\n"))
743 self.ui.warn(_("no rollback information available\n"))
737 return 1
744 return 1
738 finally:
745 finally:
739 release(lock, wlock)
746 release(lock, wlock)
740
747
741 def invalidatecaches(self):
748 def invalidatecaches(self):
742 self._tags = None
749 self._tags = None
743 self._tagtypes = None
750 self._tagtypes = None
744 self.nodetagscache = None
751 self.nodetagscache = None
745 self._branchcache = None # in UTF-8
752 self._branchcache = None # in UTF-8
746 self._branchcachetip = None
753 self._branchcachetip = None
747
754
748 def invalidate(self):
755 def invalidate(self):
749 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
756 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
750 if a in self.__dict__:
757 if a in self.__dict__:
751 delattr(self, a)
758 delattr(self, a)
752 self.invalidatecaches()
759 self.invalidatecaches()
753
760
754 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
761 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
755 try:
762 try:
756 l = lock.lock(lockname, 0, releasefn, desc=desc)
763 l = lock.lock(lockname, 0, releasefn, desc=desc)
757 except error.LockHeld, inst:
764 except error.LockHeld, inst:
758 if not wait:
765 if not wait:
759 raise
766 raise
760 self.ui.warn(_("waiting for lock on %s held by %r\n") %
767 self.ui.warn(_("waiting for lock on %s held by %r\n") %
761 (desc, inst.locker))
768 (desc, inst.locker))
762 # default to 600 seconds timeout
769 # default to 600 seconds timeout
763 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
770 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
764 releasefn, desc=desc)
771 releasefn, desc=desc)
765 if acquirefn:
772 if acquirefn:
766 acquirefn()
773 acquirefn()
767 return l
774 return l
768
775
769 def lock(self, wait=True):
776 def lock(self, wait=True):
770 '''Lock the repository store (.hg/store) and return a weak reference
777 '''Lock the repository store (.hg/store) and return a weak reference
771 to the lock. Use this before modifying the store (e.g. committing or
778 to the lock. Use this before modifying the store (e.g. committing or
772 stripping). If you are opening a transaction, get a lock as well.)'''
779 stripping). If you are opening a transaction, get a lock as well.)'''
773 l = self._lockref and self._lockref()
780 l = self._lockref and self._lockref()
774 if l is not None and l.held:
781 if l is not None and l.held:
775 l.lock()
782 l.lock()
776 return l
783 return l
777
784
778 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
785 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
779 _('repository %s') % self.origroot)
786 _('repository %s') % self.origroot)
780 self._lockref = weakref.ref(l)
787 self._lockref = weakref.ref(l)
781 return l
788 return l
782
789
783 def wlock(self, wait=True):
790 def wlock(self, wait=True):
784 '''Lock the non-store parts of the repository (everything under
791 '''Lock the non-store parts of the repository (everything under
785 .hg except .hg/store) and return a weak reference to the lock.
792 .hg except .hg/store) and return a weak reference to the lock.
786 Use this before modifying files in .hg.'''
793 Use this before modifying files in .hg.'''
787 l = self._wlockref and self._wlockref()
794 l = self._wlockref and self._wlockref()
788 if l is not None and l.held:
795 if l is not None and l.held:
789 l.lock()
796 l.lock()
790 return l
797 return l
791
798
792 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
799 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
793 self.dirstate.invalidate, _('working directory of %s') %
800 self.dirstate.invalidate, _('working directory of %s') %
794 self.origroot)
801 self.origroot)
795 self._wlockref = weakref.ref(l)
802 self._wlockref = weakref.ref(l)
796 return l
803 return l
797
804
798 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
805 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
799 """
806 """
800 commit an individual file as part of a larger transaction
807 commit an individual file as part of a larger transaction
801 """
808 """
802
809
803 fname = fctx.path()
810 fname = fctx.path()
804 text = fctx.data()
811 text = fctx.data()
805 flog = self.file(fname)
812 flog = self.file(fname)
806 fparent1 = manifest1.get(fname, nullid)
813 fparent1 = manifest1.get(fname, nullid)
807 fparent2 = fparent2o = manifest2.get(fname, nullid)
814 fparent2 = fparent2o = manifest2.get(fname, nullid)
808
815
809 meta = {}
816 meta = {}
810 copy = fctx.renamed()
817 copy = fctx.renamed()
811 if copy and copy[0] != fname:
818 if copy and copy[0] != fname:
812 # Mark the new revision of this file as a copy of another
819 # Mark the new revision of this file as a copy of another
813 # file. This copy data will effectively act as a parent
820 # file. This copy data will effectively act as a parent
814 # of this new revision. If this is a merge, the first
821 # of this new revision. If this is a merge, the first
815 # parent will be the nullid (meaning "look up the copy data")
822 # parent will be the nullid (meaning "look up the copy data")
816 # and the second one will be the other parent. For example:
823 # and the second one will be the other parent. For example:
817 #
824 #
818 # 0 --- 1 --- 3 rev1 changes file foo
825 # 0 --- 1 --- 3 rev1 changes file foo
819 # \ / rev2 renames foo to bar and changes it
826 # \ / rev2 renames foo to bar and changes it
820 # \- 2 -/ rev3 should have bar with all changes and
827 # \- 2 -/ rev3 should have bar with all changes and
821 # should record that bar descends from
828 # should record that bar descends from
822 # bar in rev2 and foo in rev1
829 # bar in rev2 and foo in rev1
823 #
830 #
824 # this allows this merge to succeed:
831 # this allows this merge to succeed:
825 #
832 #
826 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
833 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
827 # \ / merging rev3 and rev4 should use bar@rev2
834 # \ / merging rev3 and rev4 should use bar@rev2
828 # \- 2 --- 4 as the merge base
835 # \- 2 --- 4 as the merge base
829 #
836 #
830
837
831 cfname = copy[0]
838 cfname = copy[0]
832 crev = manifest1.get(cfname)
839 crev = manifest1.get(cfname)
833 newfparent = fparent2
840 newfparent = fparent2
834
841
835 if manifest2: # branch merge
842 if manifest2: # branch merge
836 if fparent2 == nullid or crev is None: # copied on remote side
843 if fparent2 == nullid or crev is None: # copied on remote side
837 if cfname in manifest2:
844 if cfname in manifest2:
838 crev = manifest2[cfname]
845 crev = manifest2[cfname]
839 newfparent = fparent1
846 newfparent = fparent1
840
847
841 # find source in nearest ancestor if we've lost track
848 # find source in nearest ancestor if we've lost track
842 if not crev:
849 if not crev:
843 self.ui.debug(" %s: searching for copy revision for %s\n" %
850 self.ui.debug(" %s: searching for copy revision for %s\n" %
844 (fname, cfname))
851 (fname, cfname))
845 for ancestor in self[None].ancestors():
852 for ancestor in self[None].ancestors():
846 if cfname in ancestor:
853 if cfname in ancestor:
847 crev = ancestor[cfname].filenode()
854 crev = ancestor[cfname].filenode()
848 break
855 break
849
856
850 if crev:
857 if crev:
851 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
858 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
852 meta["copy"] = cfname
859 meta["copy"] = cfname
853 meta["copyrev"] = hex(crev)
860 meta["copyrev"] = hex(crev)
854 fparent1, fparent2 = nullid, newfparent
861 fparent1, fparent2 = nullid, newfparent
855 else:
862 else:
856 self.ui.warn(_("warning: can't find ancestor for '%s' "
863 self.ui.warn(_("warning: can't find ancestor for '%s' "
857 "copied from '%s'!\n") % (fname, cfname))
864 "copied from '%s'!\n") % (fname, cfname))
858
865
859 elif fparent2 != nullid:
866 elif fparent2 != nullid:
860 # is one parent an ancestor of the other?
867 # is one parent an ancestor of the other?
861 fparentancestor = flog.ancestor(fparent1, fparent2)
868 fparentancestor = flog.ancestor(fparent1, fparent2)
862 if fparentancestor == fparent1:
869 if fparentancestor == fparent1:
863 fparent1, fparent2 = fparent2, nullid
870 fparent1, fparent2 = fparent2, nullid
864 elif fparentancestor == fparent2:
871 elif fparentancestor == fparent2:
865 fparent2 = nullid
872 fparent2 = nullid
866
873
867 # is the file changed?
874 # is the file changed?
868 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
875 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
869 changelist.append(fname)
876 changelist.append(fname)
870 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
877 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
871
878
872 # are just the flags changed during merge?
879 # are just the flags changed during merge?
873 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
880 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
874 changelist.append(fname)
881 changelist.append(fname)
875
882
876 return fparent1
883 return fparent1
877
884
878 def commit(self, text="", user=None, date=None, match=None, force=False,
885 def commit(self, text="", user=None, date=None, match=None, force=False,
879 editor=False, extra={}):
886 editor=False, extra={}):
880 """Add a new revision to current repository.
887 """Add a new revision to current repository.
881
888
882 Revision information is gathered from the working directory,
889 Revision information is gathered from the working directory,
883 match can be used to filter the committed files. If editor is
890 match can be used to filter the committed files. If editor is
884 supplied, it is called to get a commit message.
891 supplied, it is called to get a commit message.
885 """
892 """
886
893
887 def fail(f, msg):
894 def fail(f, msg):
888 raise util.Abort('%s: %s' % (f, msg))
895 raise util.Abort('%s: %s' % (f, msg))
889
896
890 if not match:
897 if not match:
891 match = matchmod.always(self.root, '')
898 match = matchmod.always(self.root, '')
892
899
893 if not force:
900 if not force:
894 vdirs = []
901 vdirs = []
895 match.dir = vdirs.append
902 match.dir = vdirs.append
896 match.bad = fail
903 match.bad = fail
897
904
898 wlock = self.wlock()
905 wlock = self.wlock()
899 try:
906 try:
900 wctx = self[None]
907 wctx = self[None]
901 merge = len(wctx.parents()) > 1
908 merge = len(wctx.parents()) > 1
902
909
903 if (not force and merge and match and
910 if (not force and merge and match and
904 (match.files() or match.anypats())):
911 (match.files() or match.anypats())):
905 raise util.Abort(_('cannot partially commit a merge '
912 raise util.Abort(_('cannot partially commit a merge '
906 '(do not specify files or patterns)'))
913 '(do not specify files or patterns)'))
907
914
908 changes = self.status(match=match, clean=force)
915 changes = self.status(match=match, clean=force)
909 if force:
916 if force:
910 changes[0].extend(changes[6]) # mq may commit unchanged files
917 changes[0].extend(changes[6]) # mq may commit unchanged files
911
918
912 # check subrepos
919 # check subrepos
913 subs = []
920 subs = []
914 removedsubs = set()
921 removedsubs = set()
915 for p in wctx.parents():
922 for p in wctx.parents():
916 removedsubs.update(s for s in p.substate if match(s))
923 removedsubs.update(s for s in p.substate if match(s))
917 for s in wctx.substate:
924 for s in wctx.substate:
918 removedsubs.discard(s)
925 removedsubs.discard(s)
919 if match(s) and wctx.sub(s).dirty():
926 if match(s) and wctx.sub(s).dirty():
920 subs.append(s)
927 subs.append(s)
921 if (subs or removedsubs):
928 if (subs or removedsubs):
922 if (not match('.hgsub') and
929 if (not match('.hgsub') and
923 '.hgsub' in (wctx.modified() + wctx.added())):
930 '.hgsub' in (wctx.modified() + wctx.added())):
924 raise util.Abort(_("can't commit subrepos without .hgsub"))
931 raise util.Abort(_("can't commit subrepos without .hgsub"))
925 if '.hgsubstate' not in changes[0]:
932 if '.hgsubstate' not in changes[0]:
926 changes[0].insert(0, '.hgsubstate')
933 changes[0].insert(0, '.hgsubstate')
927
934
928 # make sure all explicit patterns are matched
935 # make sure all explicit patterns are matched
929 if not force and match.files():
936 if not force and match.files():
930 matched = set(changes[0] + changes[1] + changes[2])
937 matched = set(changes[0] + changes[1] + changes[2])
931
938
932 for f in match.files():
939 for f in match.files():
933 if f == '.' or f in matched or f in wctx.substate:
940 if f == '.' or f in matched or f in wctx.substate:
934 continue
941 continue
935 if f in changes[3]: # missing
942 if f in changes[3]: # missing
936 fail(f, _('file not found!'))
943 fail(f, _('file not found!'))
937 if f in vdirs: # visited directory
944 if f in vdirs: # visited directory
938 d = f + '/'
945 d = f + '/'
939 for mf in matched:
946 for mf in matched:
940 if mf.startswith(d):
947 if mf.startswith(d):
941 break
948 break
942 else:
949 else:
943 fail(f, _("no match under directory!"))
950 fail(f, _("no match under directory!"))
944 elif f not in self.dirstate:
951 elif f not in self.dirstate:
945 fail(f, _("file not tracked!"))
952 fail(f, _("file not tracked!"))
946
953
947 if (not force and not extra.get("close") and not merge
954 if (not force and not extra.get("close") and not merge
948 and not (changes[0] or changes[1] or changes[2])
955 and not (changes[0] or changes[1] or changes[2])
949 and wctx.branch() == wctx.p1().branch()):
956 and wctx.branch() == wctx.p1().branch()):
950 return None
957 return None
951
958
952 ms = mergemod.mergestate(self)
959 ms = mergemod.mergestate(self)
953 for f in changes[0]:
960 for f in changes[0]:
954 if f in ms and ms[f] == 'u':
961 if f in ms and ms[f] == 'u':
955 raise util.Abort(_("unresolved merge conflicts "
962 raise util.Abort(_("unresolved merge conflicts "
956 "(see hg resolve)"))
963 "(see hg resolve)"))
957
964
958 cctx = context.workingctx(self, text, user, date, extra, changes)
965 cctx = context.workingctx(self, text, user, date, extra, changes)
959 if editor:
966 if editor:
960 cctx._text = editor(self, cctx, subs)
967 cctx._text = editor(self, cctx, subs)
961 edited = (text != cctx._text)
968 edited = (text != cctx._text)
962
969
963 # commit subs
970 # commit subs
964 if subs or removedsubs:
971 if subs or removedsubs:
965 state = wctx.substate.copy()
972 state = wctx.substate.copy()
966 for s in sorted(subs):
973 for s in sorted(subs):
967 sub = wctx.sub(s)
974 sub = wctx.sub(s)
968 self.ui.status(_('committing subrepository %s\n') %
975 self.ui.status(_('committing subrepository %s\n') %
969 subrepo.subrelpath(sub))
976 subrepo.subrelpath(sub))
970 sr = sub.commit(cctx._text, user, date)
977 sr = sub.commit(cctx._text, user, date)
971 state[s] = (state[s][0], sr)
978 state[s] = (state[s][0], sr)
972 subrepo.writestate(self, state)
979 subrepo.writestate(self, state)
973
980
974 # Save commit message in case this transaction gets rolled back
981 # Save commit message in case this transaction gets rolled back
975 # (e.g. by a pretxncommit hook). Leave the content alone on
982 # (e.g. by a pretxncommit hook). Leave the content alone on
976 # the assumption that the user will use the same editor again.
983 # the assumption that the user will use the same editor again.
977 msgfile = self.opener('last-message.txt', 'wb')
984 msgfile = self.opener('last-message.txt', 'wb')
978 msgfile.write(cctx._text)
985 msgfile.write(cctx._text)
979 msgfile.close()
986 msgfile.close()
980
987
981 p1, p2 = self.dirstate.parents()
988 p1, p2 = self.dirstate.parents()
982 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
989 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
983 try:
990 try:
984 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
991 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
985 ret = self.commitctx(cctx, True)
992 ret = self.commitctx(cctx, True)
986 except:
993 except:
987 if edited:
994 if edited:
988 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
995 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
989 self.ui.write(
996 self.ui.write(
990 _('note: commit message saved in %s\n') % msgfn)
997 _('note: commit message saved in %s\n') % msgfn)
991 raise
998 raise
992
999
993 # update bookmarks, dirstate and mergestate
1000 # update bookmarks, dirstate and mergestate
994 parents = (p1, p2)
1001 parents = (p1, p2)
995 if p2 == nullid:
1002 if p2 == nullid:
996 parents = (p1,)
1003 parents = (p1,)
997 bookmarks.update(self, parents, ret)
1004 bookmarks.update(self, parents, ret)
998 for f in changes[0] + changes[1]:
1005 for f in changes[0] + changes[1]:
999 self.dirstate.normal(f)
1006 self.dirstate.normal(f)
1000 for f in changes[2]:
1007 for f in changes[2]:
1001 self.dirstate.forget(f)
1008 self.dirstate.forget(f)
1002 self.dirstate.setparents(ret)
1009 self.dirstate.setparents(ret)
1003 ms.reset()
1010 ms.reset()
1004 finally:
1011 finally:
1005 wlock.release()
1012 wlock.release()
1006
1013
1007 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1014 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1008 return ret
1015 return ret
1009
1016
1010 def commitctx(self, ctx, error=False):
1017 def commitctx(self, ctx, error=False):
1011 """Add a new revision to current repository.
1018 """Add a new revision to current repository.
1012 Revision information is passed via the context argument.
1019 Revision information is passed via the context argument.
1013 """
1020 """
1014
1021
1015 tr = lock = None
1022 tr = lock = None
1016 removed = list(ctx.removed())
1023 removed = list(ctx.removed())
1017 p1, p2 = ctx.p1(), ctx.p2()
1024 p1, p2 = ctx.p1(), ctx.p2()
1018 m1 = p1.manifest().copy()
1025 m1 = p1.manifest().copy()
1019 m2 = p2.manifest()
1026 m2 = p2.manifest()
1020 user = ctx.user()
1027 user = ctx.user()
1021
1028
1022 lock = self.lock()
1029 lock = self.lock()
1023 try:
1030 try:
1024 tr = self.transaction("commit")
1031 tr = self.transaction("commit")
1025 trp = weakref.proxy(tr)
1032 trp = weakref.proxy(tr)
1026
1033
1027 # check in files
1034 # check in files
1028 new = {}
1035 new = {}
1029 changed = []
1036 changed = []
1030 linkrev = len(self)
1037 linkrev = len(self)
1031 for f in sorted(ctx.modified() + ctx.added()):
1038 for f in sorted(ctx.modified() + ctx.added()):
1032 self.ui.note(f + "\n")
1039 self.ui.note(f + "\n")
1033 try:
1040 try:
1034 fctx = ctx[f]
1041 fctx = ctx[f]
1035 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1042 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1036 changed)
1043 changed)
1037 m1.set(f, fctx.flags())
1044 m1.set(f, fctx.flags())
1038 except OSError, inst:
1045 except OSError, inst:
1039 self.ui.warn(_("trouble committing %s!\n") % f)
1046 self.ui.warn(_("trouble committing %s!\n") % f)
1040 raise
1047 raise
1041 except IOError, inst:
1048 except IOError, inst:
1042 errcode = getattr(inst, 'errno', errno.ENOENT)
1049 errcode = getattr(inst, 'errno', errno.ENOENT)
1043 if error or errcode and errcode != errno.ENOENT:
1050 if error or errcode and errcode != errno.ENOENT:
1044 self.ui.warn(_("trouble committing %s!\n") % f)
1051 self.ui.warn(_("trouble committing %s!\n") % f)
1045 raise
1052 raise
1046 else:
1053 else:
1047 removed.append(f)
1054 removed.append(f)
1048
1055
1049 # update manifest
1056 # update manifest
1050 m1.update(new)
1057 m1.update(new)
1051 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1058 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1052 drop = [f for f in removed if f in m1]
1059 drop = [f for f in removed if f in m1]
1053 for f in drop:
1060 for f in drop:
1054 del m1[f]
1061 del m1[f]
1055 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1062 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1056 p2.manifestnode(), (new, drop))
1063 p2.manifestnode(), (new, drop))
1057
1064
1058 # update changelog
1065 # update changelog
1059 self.changelog.delayupdate()
1066 self.changelog.delayupdate()
1060 n = self.changelog.add(mn, changed + removed, ctx.description(),
1067 n = self.changelog.add(mn, changed + removed, ctx.description(),
1061 trp, p1.node(), p2.node(),
1068 trp, p1.node(), p2.node(),
1062 user, ctx.date(), ctx.extra().copy())
1069 user, ctx.date(), ctx.extra().copy())
1063 p = lambda: self.changelog.writepending() and self.root or ""
1070 p = lambda: self.changelog.writepending() and self.root or ""
1064 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1071 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1065 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1072 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1066 parent2=xp2, pending=p)
1073 parent2=xp2, pending=p)
1067 self.changelog.finalize(trp)
1074 self.changelog.finalize(trp)
1068 tr.close()
1075 tr.close()
1069
1076
1070 if self._branchcache:
1077 if self._branchcache:
1071 self.updatebranchcache()
1078 self.updatebranchcache()
1072 return n
1079 return n
1073 finally:
1080 finally:
1074 if tr:
1081 if tr:
1075 tr.release()
1082 tr.release()
1076 lock.release()
1083 lock.release()
1077
1084
1078 def destroyed(self):
1085 def destroyed(self):
1079 '''Inform the repository that nodes have been destroyed.
1086 '''Inform the repository that nodes have been destroyed.
1080 Intended for use by strip and rollback, so there's a common
1087 Intended for use by strip and rollback, so there's a common
1081 place for anything that has to be done after destroying history.'''
1088 place for anything that has to be done after destroying history.'''
1082 # XXX it might be nice if we could take the list of destroyed
1089 # XXX it might be nice if we could take the list of destroyed
1083 # nodes, but I don't see an easy way for rollback() to do that
1090 # nodes, but I don't see an easy way for rollback() to do that
1084
1091
1085 # Ensure the persistent tag cache is updated. Doing it now
1092 # Ensure the persistent tag cache is updated. Doing it now
1086 # means that the tag cache only has to worry about destroyed
1093 # means that the tag cache only has to worry about destroyed
1087 # heads immediately after a strip/rollback. That in turn
1094 # heads immediately after a strip/rollback. That in turn
1088 # guarantees that "cachetip == currenttip" (comparing both rev
1095 # guarantees that "cachetip == currenttip" (comparing both rev
1089 # and node) always means no nodes have been added or destroyed.
1096 # and node) always means no nodes have been added or destroyed.
1090
1097
1091 # XXX this is suboptimal when qrefresh'ing: we strip the current
1098 # XXX this is suboptimal when qrefresh'ing: we strip the current
1092 # head, refresh the tag cache, then immediately add a new head.
1099 # head, refresh the tag cache, then immediately add a new head.
1093 # But I think doing it this way is necessary for the "instant
1100 # But I think doing it this way is necessary for the "instant
1094 # tag cache retrieval" case to work.
1101 # tag cache retrieval" case to work.
1095 self.invalidatecaches()
1102 self.invalidatecaches()
1096
1103
1097 def walk(self, match, node=None):
1104 def walk(self, match, node=None):
1098 '''
1105 '''
1099 walk recursively through the directory tree or a given
1106 walk recursively through the directory tree or a given
1100 changeset, finding all files matched by the match
1107 changeset, finding all files matched by the match
1101 function
1108 function
1102 '''
1109 '''
1103 return self[node].walk(match)
1110 return self[node].walk(match)
1104
1111
1105 def status(self, node1='.', node2=None, match=None,
1112 def status(self, node1='.', node2=None, match=None,
1106 ignored=False, clean=False, unknown=False,
1113 ignored=False, clean=False, unknown=False,
1107 listsubrepos=False):
1114 listsubrepos=False):
1108 """return status of files between two nodes or node and working directory
1115 """return status of files between two nodes or node and working directory
1109
1116
1110 If node1 is None, use the first dirstate parent instead.
1117 If node1 is None, use the first dirstate parent instead.
1111 If node2 is None, compare node1 with working directory.
1118 If node2 is None, compare node1 with working directory.
1112 """
1119 """
1113
1120
1114 def mfmatches(ctx):
1121 def mfmatches(ctx):
1115 mf = ctx.manifest().copy()
1122 mf = ctx.manifest().copy()
1116 for fn in mf.keys():
1123 for fn in mf.keys():
1117 if not match(fn):
1124 if not match(fn):
1118 del mf[fn]
1125 del mf[fn]
1119 return mf
1126 return mf
1120
1127
1121 if isinstance(node1, context.changectx):
1128 if isinstance(node1, context.changectx):
1122 ctx1 = node1
1129 ctx1 = node1
1123 else:
1130 else:
1124 ctx1 = self[node1]
1131 ctx1 = self[node1]
1125 if isinstance(node2, context.changectx):
1132 if isinstance(node2, context.changectx):
1126 ctx2 = node2
1133 ctx2 = node2
1127 else:
1134 else:
1128 ctx2 = self[node2]
1135 ctx2 = self[node2]
1129
1136
1130 working = ctx2.rev() is None
1137 working = ctx2.rev() is None
1131 parentworking = working and ctx1 == self['.']
1138 parentworking = working and ctx1 == self['.']
1132 match = match or matchmod.always(self.root, self.getcwd())
1139 match = match or matchmod.always(self.root, self.getcwd())
1133 listignored, listclean, listunknown = ignored, clean, unknown
1140 listignored, listclean, listunknown = ignored, clean, unknown
1134
1141
1135 # load earliest manifest first for caching reasons
1142 # load earliest manifest first for caching reasons
1136 if not working and ctx2.rev() < ctx1.rev():
1143 if not working and ctx2.rev() < ctx1.rev():
1137 ctx2.manifest()
1144 ctx2.manifest()
1138
1145
1139 if not parentworking:
1146 if not parentworking:
1140 def bad(f, msg):
1147 def bad(f, msg):
1141 if f not in ctx1:
1148 if f not in ctx1:
1142 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1149 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1143 match.bad = bad
1150 match.bad = bad
1144
1151
1145 if working: # we need to scan the working dir
1152 if working: # we need to scan the working dir
1146 subrepos = []
1153 subrepos = []
1147 if '.hgsub' in self.dirstate:
1154 if '.hgsub' in self.dirstate:
1148 subrepos = ctx1.substate.keys()
1155 subrepos = ctx1.substate.keys()
1149 s = self.dirstate.status(match, subrepos, listignored,
1156 s = self.dirstate.status(match, subrepos, listignored,
1150 listclean, listunknown)
1157 listclean, listunknown)
1151 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1158 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1152
1159
1153 # check for any possibly clean files
1160 # check for any possibly clean files
1154 if parentworking and cmp:
1161 if parentworking and cmp:
1155 fixup = []
1162 fixup = []
1156 # do a full compare of any files that might have changed
1163 # do a full compare of any files that might have changed
1157 for f in sorted(cmp):
1164 for f in sorted(cmp):
1158 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1165 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1159 or ctx1[f].cmp(ctx2[f])):
1166 or ctx1[f].cmp(ctx2[f])):
1160 modified.append(f)
1167 modified.append(f)
1161 else:
1168 else:
1162 fixup.append(f)
1169 fixup.append(f)
1163
1170
1164 # update dirstate for files that are actually clean
1171 # update dirstate for files that are actually clean
1165 if fixup:
1172 if fixup:
1166 if listclean:
1173 if listclean:
1167 clean += fixup
1174 clean += fixup
1168
1175
1169 try:
1176 try:
1170 # updating the dirstate is optional
1177 # updating the dirstate is optional
1171 # so we don't wait on the lock
1178 # so we don't wait on the lock
1172 wlock = self.wlock(False)
1179 wlock = self.wlock(False)
1173 try:
1180 try:
1174 for f in fixup:
1181 for f in fixup:
1175 self.dirstate.normal(f)
1182 self.dirstate.normal(f)
1176 finally:
1183 finally:
1177 wlock.release()
1184 wlock.release()
1178 except error.LockError:
1185 except error.LockError:
1179 pass
1186 pass
1180
1187
1181 if not parentworking:
1188 if not parentworking:
1182 mf1 = mfmatches(ctx1)
1189 mf1 = mfmatches(ctx1)
1183 if working:
1190 if working:
1184 # we are comparing working dir against non-parent
1191 # we are comparing working dir against non-parent
1185 # generate a pseudo-manifest for the working dir
1192 # generate a pseudo-manifest for the working dir
1186 mf2 = mfmatches(self['.'])
1193 mf2 = mfmatches(self['.'])
1187 for f in cmp + modified + added:
1194 for f in cmp + modified + added:
1188 mf2[f] = None
1195 mf2[f] = None
1189 mf2.set(f, ctx2.flags(f))
1196 mf2.set(f, ctx2.flags(f))
1190 for f in removed:
1197 for f in removed:
1191 if f in mf2:
1198 if f in mf2:
1192 del mf2[f]
1199 del mf2[f]
1193 else:
1200 else:
1194 # we are comparing two revisions
1201 # we are comparing two revisions
1195 deleted, unknown, ignored = [], [], []
1202 deleted, unknown, ignored = [], [], []
1196 mf2 = mfmatches(ctx2)
1203 mf2 = mfmatches(ctx2)
1197
1204
1198 modified, added, clean = [], [], []
1205 modified, added, clean = [], [], []
1199 for fn in mf2:
1206 for fn in mf2:
1200 if fn in mf1:
1207 if fn in mf1:
1201 if (mf1.flags(fn) != mf2.flags(fn) or
1208 if (mf1.flags(fn) != mf2.flags(fn) or
1202 (mf1[fn] != mf2[fn] and
1209 (mf1[fn] != mf2[fn] and
1203 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1210 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1204 modified.append(fn)
1211 modified.append(fn)
1205 elif listclean:
1212 elif listclean:
1206 clean.append(fn)
1213 clean.append(fn)
1207 del mf1[fn]
1214 del mf1[fn]
1208 else:
1215 else:
1209 added.append(fn)
1216 added.append(fn)
1210 removed = mf1.keys()
1217 removed = mf1.keys()
1211
1218
1212 r = modified, added, removed, deleted, unknown, ignored, clean
1219 r = modified, added, removed, deleted, unknown, ignored, clean
1213
1220
1214 if listsubrepos:
1221 if listsubrepos:
1215 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1222 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1216 if working:
1223 if working:
1217 rev2 = None
1224 rev2 = None
1218 else:
1225 else:
1219 rev2 = ctx2.substate[subpath][1]
1226 rev2 = ctx2.substate[subpath][1]
1220 try:
1227 try:
1221 submatch = matchmod.narrowmatcher(subpath, match)
1228 submatch = matchmod.narrowmatcher(subpath, match)
1222 s = sub.status(rev2, match=submatch, ignored=listignored,
1229 s = sub.status(rev2, match=submatch, ignored=listignored,
1223 clean=listclean, unknown=listunknown,
1230 clean=listclean, unknown=listunknown,
1224 listsubrepos=True)
1231 listsubrepos=True)
1225 for rfiles, sfiles in zip(r, s):
1232 for rfiles, sfiles in zip(r, s):
1226 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1233 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1227 except error.LookupError:
1234 except error.LookupError:
1228 self.ui.status(_("skipping missing subrepository: %s\n")
1235 self.ui.status(_("skipping missing subrepository: %s\n")
1229 % subpath)
1236 % subpath)
1230
1237
1231 [l.sort() for l in r]
1238 [l.sort() for l in r]
1232 return r
1239 return r
1233
1240
1234 def heads(self, start=None):
1241 def heads(self, start=None):
1235 heads = self.changelog.heads(start)
1242 heads = self.changelog.heads(start)
1236 # sort the output in rev descending order
1243 # sort the output in rev descending order
1237 return sorted(heads, key=self.changelog.rev, reverse=True)
1244 return sorted(heads, key=self.changelog.rev, reverse=True)
1238
1245
1239 def branchheads(self, branch=None, start=None, closed=False):
1246 def branchheads(self, branch=None, start=None, closed=False):
1240 '''return a (possibly filtered) list of heads for the given branch
1247 '''return a (possibly filtered) list of heads for the given branch
1241
1248
1242 Heads are returned in topological order, from newest to oldest.
1249 Heads are returned in topological order, from newest to oldest.
1243 If branch is None, use the dirstate branch.
1250 If branch is None, use the dirstate branch.
1244 If start is not None, return only heads reachable from start.
1251 If start is not None, return only heads reachable from start.
1245 If closed is True, return heads that are marked as closed as well.
1252 If closed is True, return heads that are marked as closed as well.
1246 '''
1253 '''
1247 if branch is None:
1254 if branch is None:
1248 branch = self[None].branch()
1255 branch = self[None].branch()
1249 branches = self.branchmap()
1256 branches = self.branchmap()
1250 if branch not in branches:
1257 if branch not in branches:
1251 return []
1258 return []
1252 # the cache returns heads ordered lowest to highest
1259 # the cache returns heads ordered lowest to highest
1253 bheads = list(reversed(branches[branch]))
1260 bheads = list(reversed(branches[branch]))
1254 if start is not None:
1261 if start is not None:
1255 # filter out the heads that cannot be reached from startrev
1262 # filter out the heads that cannot be reached from startrev
1256 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1257 bheads = [h for h in bheads if h in fbheads]
1264 bheads = [h for h in bheads if h in fbheads]
1258 if not closed:
1265 if not closed:
1259 bheads = [h for h in bheads if
1266 bheads = [h for h in bheads if
1260 ('close' not in self.changelog.read(h)[5])]
1267 ('close' not in self.changelog.read(h)[5])]
1261 return bheads
1268 return bheads
1262
1269
1263 def branches(self, nodes):
1270 def branches(self, nodes):
1264 if not nodes:
1271 if not nodes:
1265 nodes = [self.changelog.tip()]
1272 nodes = [self.changelog.tip()]
1266 b = []
1273 b = []
1267 for n in nodes:
1274 for n in nodes:
1268 t = n
1275 t = n
1269 while 1:
1276 while 1:
1270 p = self.changelog.parents(n)
1277 p = self.changelog.parents(n)
1271 if p[1] != nullid or p[0] == nullid:
1278 if p[1] != nullid or p[0] == nullid:
1272 b.append((t, n, p[0], p[1]))
1279 b.append((t, n, p[0], p[1]))
1273 break
1280 break
1274 n = p[0]
1281 n = p[0]
1275 return b
1282 return b
1276
1283
1277 def between(self, pairs):
1284 def between(self, pairs):
1278 r = []
1285 r = []
1279
1286
1280 for top, bottom in pairs:
1287 for top, bottom in pairs:
1281 n, l, i = top, [], 0
1288 n, l, i = top, [], 0
1282 f = 1
1289 f = 1
1283
1290
1284 while n != bottom and n != nullid:
1291 while n != bottom and n != nullid:
1285 p = self.changelog.parents(n)[0]
1292 p = self.changelog.parents(n)[0]
1286 if i == f:
1293 if i == f:
1287 l.append(n)
1294 l.append(n)
1288 f = f * 2
1295 f = f * 2
1289 n = p
1296 n = p
1290 i += 1
1297 i += 1
1291
1298
1292 r.append(l)
1299 r.append(l)
1293
1300
1294 return r
1301 return r
1295
1302
1296 def pull(self, remote, heads=None, force=False):
1303 def pull(self, remote, heads=None, force=False):
1297 lock = self.lock()
1304 lock = self.lock()
1298 try:
1305 try:
1299 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1306 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1300 force=force)
1307 force=force)
1301 common, fetch, rheads = tmp
1308 common, fetch, rheads = tmp
1302 if not fetch:
1309 if not fetch:
1303 self.ui.status(_("no changes found\n"))
1310 self.ui.status(_("no changes found\n"))
1304 result = 0
1311 result = 0
1305 else:
1312 else:
1306 if heads is None and fetch == [nullid]:
1313 if heads is None and fetch == [nullid]:
1307 self.ui.status(_("requesting all changes\n"))
1314 self.ui.status(_("requesting all changes\n"))
1308 elif heads is None and remote.capable('changegroupsubset'):
1315 elif heads is None and remote.capable('changegroupsubset'):
1309 # issue1320, avoid a race if remote changed after discovery
1316 # issue1320, avoid a race if remote changed after discovery
1310 heads = rheads
1317 heads = rheads
1311
1318
1312 if heads is None:
1319 if heads is None:
1313 cg = remote.changegroup(fetch, 'pull')
1320 cg = remote.changegroup(fetch, 'pull')
1314 elif not remote.capable('changegroupsubset'):
1321 elif not remote.capable('changegroupsubset'):
1315 raise util.Abort(_("partial pull cannot be done because "
1322 raise util.Abort(_("partial pull cannot be done because "
1316 "other repository doesn't support "
1323 "other repository doesn't support "
1317 "changegroupsubset."))
1324 "changegroupsubset."))
1318 else:
1325 else:
1319 cg = remote.changegroupsubset(fetch, heads, 'pull')
1326 cg = remote.changegroupsubset(fetch, heads, 'pull')
1320 result = self.addchangegroup(cg, 'pull', remote.url(),
1327 result = self.addchangegroup(cg, 'pull', remote.url(),
1321 lock=lock)
1328 lock=lock)
1322 finally:
1329 finally:
1323 lock.release()
1330 lock.release()
1324
1331
1325 self.ui.debug("checking for updated bookmarks\n")
1332 self.ui.debug("checking for updated bookmarks\n")
1326 rb = remote.listkeys('bookmarks')
1333 rb = remote.listkeys('bookmarks')
1327 changed = False
1334 changed = False
1328 for k in rb.keys():
1335 for k in rb.keys():
1329 if k in self._bookmarks:
1336 if k in self._bookmarks:
1330 nr, nl = rb[k], self._bookmarks[k]
1337 nr, nl = rb[k], self._bookmarks[k]
1331 if nr in self:
1338 if nr in self:
1332 cr = self[nr]
1339 cr = self[nr]
1333 cl = self[nl]
1340 cl = self[nl]
1334 if cl.rev() >= cr.rev():
1341 if cl.rev() >= cr.rev():
1335 continue
1342 continue
1336 if cr in cl.descendants():
1343 if cr in cl.descendants():
1337 self._bookmarks[k] = cr.node()
1344 self._bookmarks[k] = cr.node()
1338 changed = True
1345 changed = True
1339 self.ui.status(_("updating bookmark %s\n") % k)
1346 self.ui.status(_("updating bookmark %s\n") % k)
1340 else:
1347 else:
1341 self.ui.warn(_("not updating divergent"
1348 self.ui.warn(_("not updating divergent"
1342 " bookmark %s\n") % k)
1349 " bookmark %s\n") % k)
1343 if changed:
1350 if changed:
1344 bookmarks.write(self)
1351 bookmarks.write(self)
1345
1352
1346 return result
1353 return result
1347
1354
1348 def checkpush(self, force, revs):
1355 def checkpush(self, force, revs):
1349 """Extensions can override this function if additional checks have
1356 """Extensions can override this function if additional checks have
1350 to be performed before pushing, or call it if they override push
1357 to be performed before pushing, or call it if they override push
1351 command.
1358 command.
1352 """
1359 """
1353 pass
1360 pass
1354
1361
1355 def push(self, remote, force=False, revs=None, newbranch=False):
1362 def push(self, remote, force=False, revs=None, newbranch=False):
1356 '''Push outgoing changesets (limited by revs) from the current
1363 '''Push outgoing changesets (limited by revs) from the current
1357 repository to remote. Return an integer:
1364 repository to remote. Return an integer:
1358 - 0 means HTTP error *or* nothing to push
1365 - 0 means HTTP error *or* nothing to push
1359 - 1 means we pushed and remote head count is unchanged *or*
1366 - 1 means we pushed and remote head count is unchanged *or*
1360 we have outgoing changesets but refused to push
1367 we have outgoing changesets but refused to push
1361 - other values as described by addchangegroup()
1368 - other values as described by addchangegroup()
1362 '''
1369 '''
1363 # there are two ways to push to remote repo:
1370 # there are two ways to push to remote repo:
1364 #
1371 #
1365 # addchangegroup assumes local user can lock remote
1372 # addchangegroup assumes local user can lock remote
1366 # repo (local filesystem, old ssh servers).
1373 # repo (local filesystem, old ssh servers).
1367 #
1374 #
1368 # unbundle assumes local user cannot lock remote repo (new ssh
1375 # unbundle assumes local user cannot lock remote repo (new ssh
1369 # servers, http servers).
1376 # servers, http servers).
1370
1377
1371 self.checkpush(force, revs)
1378 self.checkpush(force, revs)
1372 lock = None
1379 lock = None
1373 unbundle = remote.capable('unbundle')
1380 unbundle = remote.capable('unbundle')
1374 if not unbundle:
1381 if not unbundle:
1375 lock = remote.lock()
1382 lock = remote.lock()
1376 try:
1383 try:
1377 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1384 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1378 newbranch)
1385 newbranch)
1379 ret = remote_heads
1386 ret = remote_heads
1380 if cg is not None:
1387 if cg is not None:
1381 if unbundle:
1388 if unbundle:
1382 # local repo finds heads on server, finds out what
1389 # local repo finds heads on server, finds out what
1383 # revs it must push. once revs transferred, if server
1390 # revs it must push. once revs transferred, if server
1384 # finds it has different heads (someone else won
1391 # finds it has different heads (someone else won
1385 # commit/push race), server aborts.
1392 # commit/push race), server aborts.
1386 if force:
1393 if force:
1387 remote_heads = ['force']
1394 remote_heads = ['force']
1388 # ssh: return remote's addchangegroup()
1395 # ssh: return remote's addchangegroup()
1389 # http: return remote's addchangegroup() or 0 for error
1396 # http: return remote's addchangegroup() or 0 for error
1390 ret = remote.unbundle(cg, remote_heads, 'push')
1397 ret = remote.unbundle(cg, remote_heads, 'push')
1391 else:
1398 else:
1392 # we return an integer indicating remote head count change
1399 # we return an integer indicating remote head count change
1393 ret = remote.addchangegroup(cg, 'push', self.url(),
1400 ret = remote.addchangegroup(cg, 'push', self.url(),
1394 lock=lock)
1401 lock=lock)
1395 finally:
1402 finally:
1396 if lock is not None:
1403 if lock is not None:
1397 lock.release()
1404 lock.release()
1398
1405
1399 self.ui.debug("checking for updated bookmarks\n")
1406 self.ui.debug("checking for updated bookmarks\n")
1400 rb = remote.listkeys('bookmarks')
1407 rb = remote.listkeys('bookmarks')
1401 for k in rb.keys():
1408 for k in rb.keys():
1402 if k in self._bookmarks:
1409 if k in self._bookmarks:
1403 nr, nl = rb[k], hex(self._bookmarks[k])
1410 nr, nl = rb[k], hex(self._bookmarks[k])
1404 if nr in self:
1411 if nr in self:
1405 cr = self[nr]
1412 cr = self[nr]
1406 cl = self[nl]
1413 cl = self[nl]
1407 if cl in cr.descendants():
1414 if cl in cr.descendants():
1408 r = remote.pushkey('bookmarks', k, nr, nl)
1415 r = remote.pushkey('bookmarks', k, nr, nl)
1409 if r:
1416 if r:
1410 self.ui.status(_("updating bookmark %s\n") % k)
1417 self.ui.status(_("updating bookmark %s\n") % k)
1411 else:
1418 else:
1412 self.ui.warn(_('updating bookmark %s'
1419 self.ui.warn(_('updating bookmark %s'
1413 ' failed!\n') % k)
1420 ' failed!\n') % k)
1414
1421
1415 return ret
1422 return ret
1416
1423
1417 def changegroupinfo(self, nodes, source):
1424 def changegroupinfo(self, nodes, source):
1418 if self.ui.verbose or source == 'bundle':
1425 if self.ui.verbose or source == 'bundle':
1419 self.ui.status(_("%d changesets found\n") % len(nodes))
1426 self.ui.status(_("%d changesets found\n") % len(nodes))
1420 if self.ui.debugflag:
1427 if self.ui.debugflag:
1421 self.ui.debug("list of changesets:\n")
1428 self.ui.debug("list of changesets:\n")
1422 for node in nodes:
1429 for node in nodes:
1423 self.ui.debug("%s\n" % hex(node))
1430 self.ui.debug("%s\n" % hex(node))
1424
1431
1425 def changegroupsubset(self, bases, heads, source, extranodes=None):
1432 def changegroupsubset(self, bases, heads, source, extranodes=None):
1426 """Compute a changegroup consisting of all the nodes that are
1433 """Compute a changegroup consisting of all the nodes that are
1427 descendents of any of the bases and ancestors of any of the heads.
1434 descendents of any of the bases and ancestors of any of the heads.
1428 Return a chunkbuffer object whose read() method will return
1435 Return a chunkbuffer object whose read() method will return
1429 successive changegroup chunks.
1436 successive changegroup chunks.
1430
1437
1431 It is fairly complex as determining which filenodes and which
1438 It is fairly complex as determining which filenodes and which
1432 manifest nodes need to be included for the changeset to be complete
1439 manifest nodes need to be included for the changeset to be complete
1433 is non-trivial.
1440 is non-trivial.
1434
1441
1435 Another wrinkle is doing the reverse, figuring out which changeset in
1442 Another wrinkle is doing the reverse, figuring out which changeset in
1436 the changegroup a particular filenode or manifestnode belongs to.
1443 the changegroup a particular filenode or manifestnode belongs to.
1437
1444
1438 The caller can specify some nodes that must be included in the
1445 The caller can specify some nodes that must be included in the
1439 changegroup using the extranodes argument. It should be a dict
1446 changegroup using the extranodes argument. It should be a dict
1440 where the keys are the filenames (or 1 for the manifest), and the
1447 where the keys are the filenames (or 1 for the manifest), and the
1441 values are lists of (node, linknode) tuples, where node is a wanted
1448 values are lists of (node, linknode) tuples, where node is a wanted
1442 node and linknode is the changelog node that should be transmitted as
1449 node and linknode is the changelog node that should be transmitted as
1443 the linkrev.
1450 the linkrev.
1444 """
1451 """
1445
1452
1446 # Set up some initial variables
1453 # Set up some initial variables
1447 # Make it easy to refer to self.changelog
1454 # Make it easy to refer to self.changelog
1448 cl = self.changelog
1455 cl = self.changelog
1449 # Compute the list of changesets in this changegroup.
1456 # Compute the list of changesets in this changegroup.
1450 # Some bases may turn out to be superfluous, and some heads may be
1457 # Some bases may turn out to be superfluous, and some heads may be
1451 # too. nodesbetween will return the minimal set of bases and heads
1458 # too. nodesbetween will return the minimal set of bases and heads
1452 # necessary to re-create the changegroup.
1459 # necessary to re-create the changegroup.
1453 if not bases:
1460 if not bases:
1454 bases = [nullid]
1461 bases = [nullid]
1455 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1462 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1456
1463
1457 if extranodes is None:
1464 if extranodes is None:
1458 # can we go through the fast path ?
1465 # can we go through the fast path ?
1459 heads.sort()
1466 heads.sort()
1460 allheads = self.heads()
1467 allheads = self.heads()
1461 allheads.sort()
1468 allheads.sort()
1462 if heads == allheads:
1469 if heads == allheads:
1463 return self._changegroup(msng_cl_lst, source)
1470 return self._changegroup(msng_cl_lst, source)
1464
1471
1465 # slow path
1472 # slow path
1466 self.hook('preoutgoing', throw=True, source=source)
1473 self.hook('preoutgoing', throw=True, source=source)
1467
1474
1468 self.changegroupinfo(msng_cl_lst, source)
1475 self.changegroupinfo(msng_cl_lst, source)
1469
1476
1470 # We assume that all ancestors of bases are known
1477 # We assume that all ancestors of bases are known
1471 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1478 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1472
1479
1473 # Make it easy to refer to self.manifest
1480 # Make it easy to refer to self.manifest
1474 mnfst = self.manifest
1481 mnfst = self.manifest
1475 # We don't know which manifests are missing yet
1482 # We don't know which manifests are missing yet
1476 msng_mnfst_set = {}
1483 msng_mnfst_set = {}
1477 # Nor do we know which filenodes are missing.
1484 # Nor do we know which filenodes are missing.
1478 msng_filenode_set = {}
1485 msng_filenode_set = {}
1479
1486
1480 # A changeset always belongs to itself, so the changenode lookup
1487 # A changeset always belongs to itself, so the changenode lookup
1481 # function for a changenode is identity.
1488 # function for a changenode is identity.
1482 def identity(x):
1489 def identity(x):
1483 return x
1490 return x
1484
1491
1485 # A function generating function that sets up the initial environment
1492 # A function generating function that sets up the initial environment
1486 # the inner function.
1493 # the inner function.
1487 def filenode_collector(changedfiles):
1494 def filenode_collector(changedfiles):
1488 # This gathers information from each manifestnode included in the
1495 # This gathers information from each manifestnode included in the
1489 # changegroup about which filenodes the manifest node references
1496 # changegroup about which filenodes the manifest node references
1490 # so we can include those in the changegroup too.
1497 # so we can include those in the changegroup too.
1491 #
1498 #
1492 # It also remembers which changenode each filenode belongs to. It
1499 # It also remembers which changenode each filenode belongs to. It
1493 # does this by assuming the a filenode belongs to the changenode
1500 # does this by assuming the a filenode belongs to the changenode
1494 # the first manifest that references it belongs to.
1501 # the first manifest that references it belongs to.
1495 def collect_msng_filenodes(mnfstnode):
1502 def collect_msng_filenodes(mnfstnode):
1496 r = mnfst.rev(mnfstnode)
1503 r = mnfst.rev(mnfstnode)
1497 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1504 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1498 # If the previous rev is one of the parents,
1505 # If the previous rev is one of the parents,
1499 # we only need to see a diff.
1506 # we only need to see a diff.
1500 deltamf = mnfst.readdelta(mnfstnode)
1507 deltamf = mnfst.readdelta(mnfstnode)
1501 # For each line in the delta
1508 # For each line in the delta
1502 for f, fnode in deltamf.iteritems():
1509 for f, fnode in deltamf.iteritems():
1503 # And if the file is in the list of files we care
1510 # And if the file is in the list of files we care
1504 # about.
1511 # about.
1505 if f in changedfiles:
1512 if f in changedfiles:
1506 # Get the changenode this manifest belongs to
1513 # Get the changenode this manifest belongs to
1507 clnode = msng_mnfst_set[mnfstnode]
1514 clnode = msng_mnfst_set[mnfstnode]
1508 # Create the set of filenodes for the file if
1515 # Create the set of filenodes for the file if
1509 # there isn't one already.
1516 # there isn't one already.
1510 ndset = msng_filenode_set.setdefault(f, {})
1517 ndset = msng_filenode_set.setdefault(f, {})
1511 # And set the filenode's changelog node to the
1518 # And set the filenode's changelog node to the
1512 # manifest's if it hasn't been set already.
1519 # manifest's if it hasn't been set already.
1513 ndset.setdefault(fnode, clnode)
1520 ndset.setdefault(fnode, clnode)
1514 else:
1521 else:
1515 # Otherwise we need a full manifest.
1522 # Otherwise we need a full manifest.
1516 m = mnfst.read(mnfstnode)
1523 m = mnfst.read(mnfstnode)
1517 # For every file in we care about.
1524 # For every file in we care about.
1518 for f in changedfiles:
1525 for f in changedfiles:
1519 fnode = m.get(f, None)
1526 fnode = m.get(f, None)
1520 # If it's in the manifest
1527 # If it's in the manifest
1521 if fnode is not None:
1528 if fnode is not None:
1522 # See comments above.
1529 # See comments above.
1523 clnode = msng_mnfst_set[mnfstnode]
1530 clnode = msng_mnfst_set[mnfstnode]
1524 ndset = msng_filenode_set.setdefault(f, {})
1531 ndset = msng_filenode_set.setdefault(f, {})
1525 ndset.setdefault(fnode, clnode)
1532 ndset.setdefault(fnode, clnode)
1526 return collect_msng_filenodes
1533 return collect_msng_filenodes
1527
1534
1528 # If we determine that a particular file or manifest node must be a
1535 # If we determine that a particular file or manifest node must be a
1529 # node that the recipient of the changegroup will already have, we can
1536 # node that the recipient of the changegroup will already have, we can
1530 # also assume the recipient will have all the parents. This function
1537 # also assume the recipient will have all the parents. This function
1531 # prunes them from the set of missing nodes.
1538 # prunes them from the set of missing nodes.
1532 def prune(revlog, missingnodes):
1539 def prune(revlog, missingnodes):
1533 hasset = set()
1540 hasset = set()
1534 # If a 'missing' filenode thinks it belongs to a changenode we
1541 # If a 'missing' filenode thinks it belongs to a changenode we
1535 # assume the recipient must have, then the recipient must have
1542 # assume the recipient must have, then the recipient must have
1536 # that filenode.
1543 # that filenode.
1537 for n in missingnodes:
1544 for n in missingnodes:
1538 clrev = revlog.linkrev(revlog.rev(n))
1545 clrev = revlog.linkrev(revlog.rev(n))
1539 if clrev in commonrevs:
1546 if clrev in commonrevs:
1540 hasset.add(n)
1547 hasset.add(n)
1541 for n in hasset:
1548 for n in hasset:
1542 missingnodes.pop(n, None)
1549 missingnodes.pop(n, None)
1543 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1550 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1544 missingnodes.pop(revlog.node(r), None)
1551 missingnodes.pop(revlog.node(r), None)
1545
1552
1546 # Add the nodes that were explicitly requested.
1553 # Add the nodes that were explicitly requested.
1547 def add_extra_nodes(name, nodes):
1554 def add_extra_nodes(name, nodes):
1548 if not extranodes or name not in extranodes:
1555 if not extranodes or name not in extranodes:
1549 return
1556 return
1550
1557
1551 for node, linknode in extranodes[name]:
1558 for node, linknode in extranodes[name]:
1552 if node not in nodes:
1559 if node not in nodes:
1553 nodes[node] = linknode
1560 nodes[node] = linknode
1554
1561
1555 # Now that we have all theses utility functions to help out and
1562 # Now that we have all theses utility functions to help out and
1556 # logically divide up the task, generate the group.
1563 # logically divide up the task, generate the group.
1557 def gengroup():
1564 def gengroup():
1558 # The set of changed files starts empty.
1565 # The set of changed files starts empty.
1559 changedfiles = set()
1566 changedfiles = set()
1560 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1567 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1561
1568
1562 # Create a changenode group generator that will call our functions
1569 # Create a changenode group generator that will call our functions
1563 # back to lookup the owning changenode and collect information.
1570 # back to lookup the owning changenode and collect information.
1564 group = cl.group(msng_cl_lst, identity, collect)
1571 group = cl.group(msng_cl_lst, identity, collect)
1565 for cnt, chnk in enumerate(group):
1572 for cnt, chnk in enumerate(group):
1566 yield chnk
1573 yield chnk
1567 # revlog.group yields three entries per node, so
1574 # revlog.group yields three entries per node, so
1568 # dividing by 3 gives an approximation of how many
1575 # dividing by 3 gives an approximation of how many
1569 # nodes have been processed.
1576 # nodes have been processed.
1570 self.ui.progress(_('bundling'), cnt / 3,
1577 self.ui.progress(_('bundling'), cnt / 3,
1571 unit=_('changesets'))
1578 unit=_('changesets'))
1572 changecount = cnt / 3
1579 changecount = cnt / 3
1573 self.ui.progress(_('bundling'), None)
1580 self.ui.progress(_('bundling'), None)
1574
1581
1575 prune(mnfst, msng_mnfst_set)
1582 prune(mnfst, msng_mnfst_set)
1576 add_extra_nodes(1, msng_mnfst_set)
1583 add_extra_nodes(1, msng_mnfst_set)
1577 msng_mnfst_lst = msng_mnfst_set.keys()
1584 msng_mnfst_lst = msng_mnfst_set.keys()
1578 # Sort the manifestnodes by revision number.
1585 # Sort the manifestnodes by revision number.
1579 msng_mnfst_lst.sort(key=mnfst.rev)
1586 msng_mnfst_lst.sort(key=mnfst.rev)
1580 # Create a generator for the manifestnodes that calls our lookup
1587 # Create a generator for the manifestnodes that calls our lookup
1581 # and data collection functions back.
1588 # and data collection functions back.
1582 group = mnfst.group(msng_mnfst_lst,
1589 group = mnfst.group(msng_mnfst_lst,
1583 lambda mnode: msng_mnfst_set[mnode],
1590 lambda mnode: msng_mnfst_set[mnode],
1584 filenode_collector(changedfiles))
1591 filenode_collector(changedfiles))
1585 efiles = {}
1592 efiles = {}
1586 for cnt, chnk in enumerate(group):
1593 for cnt, chnk in enumerate(group):
1587 if cnt % 3 == 1:
1594 if cnt % 3 == 1:
1588 mnode = chnk[:20]
1595 mnode = chnk[:20]
1589 efiles.update(mnfst.readdelta(mnode))
1596 efiles.update(mnfst.readdelta(mnode))
1590 yield chnk
1597 yield chnk
1591 # see above comment for why we divide by 3
1598 # see above comment for why we divide by 3
1592 self.ui.progress(_('bundling'), cnt / 3,
1599 self.ui.progress(_('bundling'), cnt / 3,
1593 unit=_('manifests'), total=changecount)
1600 unit=_('manifests'), total=changecount)
1594 self.ui.progress(_('bundling'), None)
1601 self.ui.progress(_('bundling'), None)
1595 efiles = len(efiles)
1602 efiles = len(efiles)
1596
1603
1597 # These are no longer needed, dereference and toss the memory for
1604 # These are no longer needed, dereference and toss the memory for
1598 # them.
1605 # them.
1599 msng_mnfst_lst = None
1606 msng_mnfst_lst = None
1600 msng_mnfst_set.clear()
1607 msng_mnfst_set.clear()
1601
1608
1602 if extranodes:
1609 if extranodes:
1603 for fname in extranodes:
1610 for fname in extranodes:
1604 if isinstance(fname, int):
1611 if isinstance(fname, int):
1605 continue
1612 continue
1606 msng_filenode_set.setdefault(fname, {})
1613 msng_filenode_set.setdefault(fname, {})
1607 changedfiles.add(fname)
1614 changedfiles.add(fname)
1608 # Go through all our files in order sorted by name.
1615 # Go through all our files in order sorted by name.
1609 for idx, fname in enumerate(sorted(changedfiles)):
1616 for idx, fname in enumerate(sorted(changedfiles)):
1610 filerevlog = self.file(fname)
1617 filerevlog = self.file(fname)
1611 if not len(filerevlog):
1618 if not len(filerevlog):
1612 raise util.Abort(_("empty or missing revlog for %s") % fname)
1619 raise util.Abort(_("empty or missing revlog for %s") % fname)
1613 # Toss out the filenodes that the recipient isn't really
1620 # Toss out the filenodes that the recipient isn't really
1614 # missing.
1621 # missing.
1615 missingfnodes = msng_filenode_set.pop(fname, {})
1622 missingfnodes = msng_filenode_set.pop(fname, {})
1616 prune(filerevlog, missingfnodes)
1623 prune(filerevlog, missingfnodes)
1617 add_extra_nodes(fname, missingfnodes)
1624 add_extra_nodes(fname, missingfnodes)
1618 # If any filenodes are left, generate the group for them,
1625 # If any filenodes are left, generate the group for them,
1619 # otherwise don't bother.
1626 # otherwise don't bother.
1620 if missingfnodes:
1627 if missingfnodes:
1621 yield changegroup.chunkheader(len(fname))
1628 yield changegroup.chunkheader(len(fname))
1622 yield fname
1629 yield fname
1623 # Sort the filenodes by their revision # (topological order)
1630 # Sort the filenodes by their revision # (topological order)
1624 nodeiter = list(missingfnodes)
1631 nodeiter = list(missingfnodes)
1625 nodeiter.sort(key=filerevlog.rev)
1632 nodeiter.sort(key=filerevlog.rev)
1626 # Create a group generator and only pass in a changenode
1633 # Create a group generator and only pass in a changenode
1627 # lookup function as we need to collect no information
1634 # lookup function as we need to collect no information
1628 # from filenodes.
1635 # from filenodes.
1629 group = filerevlog.group(nodeiter,
1636 group = filerevlog.group(nodeiter,
1630 lambda fnode: missingfnodes[fnode])
1637 lambda fnode: missingfnodes[fnode])
1631 for chnk in group:
1638 for chnk in group:
1632 # even though we print the same progress on
1639 # even though we print the same progress on
1633 # most loop iterations, put the progress call
1640 # most loop iterations, put the progress call
1634 # here so that time estimates (if any) can be updated
1641 # here so that time estimates (if any) can be updated
1635 self.ui.progress(
1642 self.ui.progress(
1636 _('bundling'), idx, item=fname,
1643 _('bundling'), idx, item=fname,
1637 unit=_('files'), total=efiles)
1644 unit=_('files'), total=efiles)
1638 yield chnk
1645 yield chnk
1639 # Signal that no more groups are left.
1646 # Signal that no more groups are left.
1640 yield changegroup.closechunk()
1647 yield changegroup.closechunk()
1641 self.ui.progress(_('bundling'), None)
1648 self.ui.progress(_('bundling'), None)
1642
1649
1643 if msng_cl_lst:
1650 if msng_cl_lst:
1644 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1645
1652
1646 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1653 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1647
1654
1648 def changegroup(self, basenodes, source):
1655 def changegroup(self, basenodes, source):
1649 # to avoid a race we use changegroupsubset() (issue1320)
1656 # to avoid a race we use changegroupsubset() (issue1320)
1650 return self.changegroupsubset(basenodes, self.heads(), source)
1657 return self.changegroupsubset(basenodes, self.heads(), source)
1651
1658
1652 def _changegroup(self, nodes, source):
1659 def _changegroup(self, nodes, source):
1653 """Compute the changegroup of all nodes that we have that a recipient
1660 """Compute the changegroup of all nodes that we have that a recipient
1654 doesn't. Return a chunkbuffer object whose read() method will return
1661 doesn't. Return a chunkbuffer object whose read() method will return
1655 successive changegroup chunks.
1662 successive changegroup chunks.
1656
1663
1657 This is much easier than the previous function as we can assume that
1664 This is much easier than the previous function as we can assume that
1658 the recipient has any changenode we aren't sending them.
1665 the recipient has any changenode we aren't sending them.
1659
1666
1660 nodes is the set of nodes to send"""
1667 nodes is the set of nodes to send"""
1661
1668
1662 self.hook('preoutgoing', throw=True, source=source)
1669 self.hook('preoutgoing', throw=True, source=source)
1663
1670
1664 cl = self.changelog
1671 cl = self.changelog
1665 revset = set([cl.rev(n) for n in nodes])
1672 revset = set([cl.rev(n) for n in nodes])
1666 self.changegroupinfo(nodes, source)
1673 self.changegroupinfo(nodes, source)
1667
1674
1668 def identity(x):
1675 def identity(x):
1669 return x
1676 return x
1670
1677
1671 def gennodelst(log):
1678 def gennodelst(log):
1672 for r in log:
1679 for r in log:
1673 if log.linkrev(r) in revset:
1680 if log.linkrev(r) in revset:
1674 yield log.node(r)
1681 yield log.node(r)
1675
1682
1676 def lookuplinkrev_func(revlog):
1683 def lookuplinkrev_func(revlog):
1677 def lookuplinkrev(n):
1684 def lookuplinkrev(n):
1678 return cl.node(revlog.linkrev(revlog.rev(n)))
1685 return cl.node(revlog.linkrev(revlog.rev(n)))
1679 return lookuplinkrev
1686 return lookuplinkrev
1680
1687
1681 def gengroup():
1688 def gengroup():
1682 '''yield a sequence of changegroup chunks (strings)'''
1689 '''yield a sequence of changegroup chunks (strings)'''
1683 # construct a list of all changed files
1690 # construct a list of all changed files
1684 changedfiles = set()
1691 changedfiles = set()
1685 mmfs = {}
1692 mmfs = {}
1686 collect = changegroup.collector(cl, mmfs, changedfiles)
1693 collect = changegroup.collector(cl, mmfs, changedfiles)
1687
1694
1688 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1695 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1689 # revlog.group yields three entries per node, so
1696 # revlog.group yields three entries per node, so
1690 # dividing by 3 gives an approximation of how many
1697 # dividing by 3 gives an approximation of how many
1691 # nodes have been processed.
1698 # nodes have been processed.
1692 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1699 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1693 yield chnk
1700 yield chnk
1694 changecount = cnt / 3
1701 changecount = cnt / 3
1695 self.ui.progress(_('bundling'), None)
1702 self.ui.progress(_('bundling'), None)
1696
1703
1697 mnfst = self.manifest
1704 mnfst = self.manifest
1698 nodeiter = gennodelst(mnfst)
1705 nodeiter = gennodelst(mnfst)
1699 efiles = {}
1706 efiles = {}
1700 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1707 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1701 lookuplinkrev_func(mnfst))):
1708 lookuplinkrev_func(mnfst))):
1702 if cnt % 3 == 1:
1709 if cnt % 3 == 1:
1703 mnode = chnk[:20]
1710 mnode = chnk[:20]
1704 efiles.update(mnfst.readdelta(mnode))
1711 efiles.update(mnfst.readdelta(mnode))
1705 # see above comment for why we divide by 3
1712 # see above comment for why we divide by 3
1706 self.ui.progress(_('bundling'), cnt / 3,
1713 self.ui.progress(_('bundling'), cnt / 3,
1707 unit=_('manifests'), total=changecount)
1714 unit=_('manifests'), total=changecount)
1708 yield chnk
1715 yield chnk
1709 efiles = len(efiles)
1716 efiles = len(efiles)
1710 self.ui.progress(_('bundling'), None)
1717 self.ui.progress(_('bundling'), None)
1711
1718
1712 for idx, fname in enumerate(sorted(changedfiles)):
1719 for idx, fname in enumerate(sorted(changedfiles)):
1713 filerevlog = self.file(fname)
1720 filerevlog = self.file(fname)
1714 if not len(filerevlog):
1721 if not len(filerevlog):
1715 raise util.Abort(_("empty or missing revlog for %s") % fname)
1722 raise util.Abort(_("empty or missing revlog for %s") % fname)
1716 nodeiter = gennodelst(filerevlog)
1723 nodeiter = gennodelst(filerevlog)
1717 nodeiter = list(nodeiter)
1724 nodeiter = list(nodeiter)
1718 if nodeiter:
1725 if nodeiter:
1719 yield changegroup.chunkheader(len(fname))
1726 yield changegroup.chunkheader(len(fname))
1720 yield fname
1727 yield fname
1721 lookup = lookuplinkrev_func(filerevlog)
1728 lookup = lookuplinkrev_func(filerevlog)
1722 for chnk in filerevlog.group(nodeiter, lookup):
1729 for chnk in filerevlog.group(nodeiter, lookup):
1723 self.ui.progress(
1730 self.ui.progress(
1724 _('bundling'), idx, item=fname,
1731 _('bundling'), idx, item=fname,
1725 total=efiles, unit=_('files'))
1732 total=efiles, unit=_('files'))
1726 yield chnk
1733 yield chnk
1727 self.ui.progress(_('bundling'), None)
1734 self.ui.progress(_('bundling'), None)
1728
1735
1729 yield changegroup.closechunk()
1736 yield changegroup.closechunk()
1730
1737
1731 if nodes:
1738 if nodes:
1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1739 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733
1740
1734 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1741 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1735
1742
1736 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1743 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1737 """Add the changegroup returned by source.read() to this repo.
1744 """Add the changegroup returned by source.read() to this repo.
1738 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1745 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1739 the URL of the repo where this changegroup is coming from.
1746 the URL of the repo where this changegroup is coming from.
1740 If lock is not None, the function takes ownership of the lock
1747 If lock is not None, the function takes ownership of the lock
1741 and releases it after the changegroup is added.
1748 and releases it after the changegroup is added.
1742
1749
1743 Return an integer summarizing the change to this repo:
1750 Return an integer summarizing the change to this repo:
1744 - nothing changed or no source: 0
1751 - nothing changed or no source: 0
1745 - more heads than before: 1+added heads (2..n)
1752 - more heads than before: 1+added heads (2..n)
1746 - fewer heads than before: -1-removed heads (-2..-n)
1753 - fewer heads than before: -1-removed heads (-2..-n)
1747 - number of heads stays the same: 1
1754 - number of heads stays the same: 1
1748 """
1755 """
1749 def csmap(x):
1756 def csmap(x):
1750 self.ui.debug("add changeset %s\n" % short(x))
1757 self.ui.debug("add changeset %s\n" % short(x))
1751 return len(cl)
1758 return len(cl)
1752
1759
1753 def revmap(x):
1760 def revmap(x):
1754 return cl.rev(x)
1761 return cl.rev(x)
1755
1762
1756 if not source:
1763 if not source:
1757 return 0
1764 return 0
1758
1765
1759 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1766 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1760
1767
1761 changesets = files = revisions = 0
1768 changesets = files = revisions = 0
1762 efiles = set()
1769 efiles = set()
1763
1770
1764 # write changelog data to temp files so concurrent readers will not see
1771 # write changelog data to temp files so concurrent readers will not see
1765 # inconsistent view
1772 # inconsistent view
1766 cl = self.changelog
1773 cl = self.changelog
1767 cl.delayupdate()
1774 cl.delayupdate()
1768 oldheads = len(cl.heads())
1775 oldheads = len(cl.heads())
1769
1776
1770 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1777 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1771 try:
1778 try:
1772 trp = weakref.proxy(tr)
1779 trp = weakref.proxy(tr)
1773 # pull off the changeset group
1780 # pull off the changeset group
1774 self.ui.status(_("adding changesets\n"))
1781 self.ui.status(_("adding changesets\n"))
1775 clstart = len(cl)
1782 clstart = len(cl)
1776 class prog(object):
1783 class prog(object):
1777 step = _('changesets')
1784 step = _('changesets')
1778 count = 1
1785 count = 1
1779 ui = self.ui
1786 ui = self.ui
1780 total = None
1787 total = None
1781 def __call__(self):
1788 def __call__(self):
1782 self.ui.progress(self.step, self.count, unit=_('chunks'),
1789 self.ui.progress(self.step, self.count, unit=_('chunks'),
1783 total=self.total)
1790 total=self.total)
1784 self.count += 1
1791 self.count += 1
1785 pr = prog()
1792 pr = prog()
1786 source.callback = pr
1793 source.callback = pr
1787
1794
1788 if (cl.addgroup(source, csmap, trp) is None
1795 if (cl.addgroup(source, csmap, trp) is None
1789 and not emptyok):
1796 and not emptyok):
1790 raise util.Abort(_("received changelog group is empty"))
1797 raise util.Abort(_("received changelog group is empty"))
1791 clend = len(cl)
1798 clend = len(cl)
1792 changesets = clend - clstart
1799 changesets = clend - clstart
1793 for c in xrange(clstart, clend):
1800 for c in xrange(clstart, clend):
1794 efiles.update(self[c].files())
1801 efiles.update(self[c].files())
1795 efiles = len(efiles)
1802 efiles = len(efiles)
1796 self.ui.progress(_('changesets'), None)
1803 self.ui.progress(_('changesets'), None)
1797
1804
1798 # pull off the manifest group
1805 # pull off the manifest group
1799 self.ui.status(_("adding manifests\n"))
1806 self.ui.status(_("adding manifests\n"))
1800 pr.step = _('manifests')
1807 pr.step = _('manifests')
1801 pr.count = 1
1808 pr.count = 1
1802 pr.total = changesets # manifests <= changesets
1809 pr.total = changesets # manifests <= changesets
1803 # no need to check for empty manifest group here:
1810 # no need to check for empty manifest group here:
1804 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1811 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1805 # no new manifest will be created and the manifest group will
1812 # no new manifest will be created and the manifest group will
1806 # be empty during the pull
1813 # be empty during the pull
1807 self.manifest.addgroup(source, revmap, trp)
1814 self.manifest.addgroup(source, revmap, trp)
1808 self.ui.progress(_('manifests'), None)
1815 self.ui.progress(_('manifests'), None)
1809
1816
1810 needfiles = {}
1817 needfiles = {}
1811 if self.ui.configbool('server', 'validate', default=False):
1818 if self.ui.configbool('server', 'validate', default=False):
1812 # validate incoming csets have their manifests
1819 # validate incoming csets have their manifests
1813 for cset in xrange(clstart, clend):
1820 for cset in xrange(clstart, clend):
1814 mfest = self.changelog.read(self.changelog.node(cset))[0]
1821 mfest = self.changelog.read(self.changelog.node(cset))[0]
1815 mfest = self.manifest.readdelta(mfest)
1822 mfest = self.manifest.readdelta(mfest)
1816 # store file nodes we must see
1823 # store file nodes we must see
1817 for f, n in mfest.iteritems():
1824 for f, n in mfest.iteritems():
1818 needfiles.setdefault(f, set()).add(n)
1825 needfiles.setdefault(f, set()).add(n)
1819
1826
1820 # process the files
1827 # process the files
1821 self.ui.status(_("adding file changes\n"))
1828 self.ui.status(_("adding file changes\n"))
1822 pr.step = 'files'
1829 pr.step = 'files'
1823 pr.count = 1
1830 pr.count = 1
1824 pr.total = efiles
1831 pr.total = efiles
1825 source.callback = None
1832 source.callback = None
1826
1833
1827 while 1:
1834 while 1:
1828 f = source.chunk()
1835 f = source.chunk()
1829 if not f:
1836 if not f:
1830 break
1837 break
1831 self.ui.debug("adding %s revisions\n" % f)
1838 self.ui.debug("adding %s revisions\n" % f)
1832 pr()
1839 pr()
1833 fl = self.file(f)
1840 fl = self.file(f)
1834 o = len(fl)
1841 o = len(fl)
1835 if fl.addgroup(source, revmap, trp) is None:
1842 if fl.addgroup(source, revmap, trp) is None:
1836 raise util.Abort(_("received file revlog group is empty"))
1843 raise util.Abort(_("received file revlog group is empty"))
1837 revisions += len(fl) - o
1844 revisions += len(fl) - o
1838 files += 1
1845 files += 1
1839 if f in needfiles:
1846 if f in needfiles:
1840 needs = needfiles[f]
1847 needs = needfiles[f]
1841 for new in xrange(o, len(fl)):
1848 for new in xrange(o, len(fl)):
1842 n = fl.node(new)
1849 n = fl.node(new)
1843 if n in needs:
1850 if n in needs:
1844 needs.remove(n)
1851 needs.remove(n)
1845 if not needs:
1852 if not needs:
1846 del needfiles[f]
1853 del needfiles[f]
1847 self.ui.progress(_('files'), None)
1854 self.ui.progress(_('files'), None)
1848
1855
1849 for f, needs in needfiles.iteritems():
1856 for f, needs in needfiles.iteritems():
1850 fl = self.file(f)
1857 fl = self.file(f)
1851 for n in needs:
1858 for n in needs:
1852 try:
1859 try:
1853 fl.rev(n)
1860 fl.rev(n)
1854 except error.LookupError:
1861 except error.LookupError:
1855 raise util.Abort(
1862 raise util.Abort(
1856 _('missing file data for %s:%s - run hg verify') %
1863 _('missing file data for %s:%s - run hg verify') %
1857 (f, hex(n)))
1864 (f, hex(n)))
1858
1865
1859 newheads = len(cl.heads())
1866 newheads = len(cl.heads())
1860 heads = ""
1867 heads = ""
1861 if oldheads and newheads != oldheads:
1868 if oldheads and newheads != oldheads:
1862 heads = _(" (%+d heads)") % (newheads - oldheads)
1869 heads = _(" (%+d heads)") % (newheads - oldheads)
1863
1870
1864 self.ui.status(_("added %d changesets"
1871 self.ui.status(_("added %d changesets"
1865 " with %d changes to %d files%s\n")
1872 " with %d changes to %d files%s\n")
1866 % (changesets, revisions, files, heads))
1873 % (changesets, revisions, files, heads))
1867
1874
1868 if changesets > 0:
1875 if changesets > 0:
1869 p = lambda: cl.writepending() and self.root or ""
1876 p = lambda: cl.writepending() and self.root or ""
1870 self.hook('pretxnchangegroup', throw=True,
1877 self.hook('pretxnchangegroup', throw=True,
1871 node=hex(cl.node(clstart)), source=srctype,
1878 node=hex(cl.node(clstart)), source=srctype,
1872 url=url, pending=p)
1879 url=url, pending=p)
1873
1880
1874 # make changelog see real files again
1881 # make changelog see real files again
1875 cl.finalize(trp)
1882 cl.finalize(trp)
1876
1883
1877 tr.close()
1884 tr.close()
1878 finally:
1885 finally:
1879 tr.release()
1886 tr.release()
1880 if lock:
1887 if lock:
1881 lock.release()
1888 lock.release()
1882
1889
1883 if changesets > 0:
1890 if changesets > 0:
1884 # forcefully update the on-disk branch cache
1891 # forcefully update the on-disk branch cache
1885 self.ui.debug("updating the branch cache\n")
1892 self.ui.debug("updating the branch cache\n")
1886 self.updatebranchcache()
1893 self.updatebranchcache()
1887 self.hook("changegroup", node=hex(cl.node(clstart)),
1894 self.hook("changegroup", node=hex(cl.node(clstart)),
1888 source=srctype, url=url)
1895 source=srctype, url=url)
1889
1896
1890 for i in xrange(clstart, clend):
1897 for i in xrange(clstart, clend):
1891 self.hook("incoming", node=hex(cl.node(i)),
1898 self.hook("incoming", node=hex(cl.node(i)),
1892 source=srctype, url=url)
1899 source=srctype, url=url)
1893
1900
1894 # FIXME - why does this care about tip?
1901 # FIXME - why does this care about tip?
1895 if newheads == oldheads:
1902 if newheads == oldheads:
1896 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1903 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1897
1904
1898 # never return 0 here:
1905 # never return 0 here:
1899 if newheads < oldheads:
1906 if newheads < oldheads:
1900 return newheads - oldheads - 1
1907 return newheads - oldheads - 1
1901 else:
1908 else:
1902 return newheads - oldheads + 1
1909 return newheads - oldheads + 1
1903
1910
1904
1911
1905 def stream_in(self, remote, requirements):
1912 def stream_in(self, remote, requirements):
1906 fp = remote.stream_out()
1913 fp = remote.stream_out()
1907 l = fp.readline()
1914 l = fp.readline()
1908 try:
1915 try:
1909 resp = int(l)
1916 resp = int(l)
1910 except ValueError:
1917 except ValueError:
1911 raise error.ResponseError(
1918 raise error.ResponseError(
1912 _('Unexpected response from remote server:'), l)
1919 _('Unexpected response from remote server:'), l)
1913 if resp == 1:
1920 if resp == 1:
1914 raise util.Abort(_('operation forbidden by server'))
1921 raise util.Abort(_('operation forbidden by server'))
1915 elif resp == 2:
1922 elif resp == 2:
1916 raise util.Abort(_('locking the remote repository failed'))
1923 raise util.Abort(_('locking the remote repository failed'))
1917 elif resp != 0:
1924 elif resp != 0:
1918 raise util.Abort(_('the server sent an unknown error code'))
1925 raise util.Abort(_('the server sent an unknown error code'))
1919 self.ui.status(_('streaming all changes\n'))
1926 self.ui.status(_('streaming all changes\n'))
1920 l = fp.readline()
1927 l = fp.readline()
1921 try:
1928 try:
1922 total_files, total_bytes = map(int, l.split(' ', 1))
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1923 except (ValueError, TypeError):
1930 except (ValueError, TypeError):
1924 raise error.ResponseError(
1931 raise error.ResponseError(
1925 _('Unexpected response from remote server:'), l)
1932 _('Unexpected response from remote server:'), l)
1926 self.ui.status(_('%d files to transfer, %s of data\n') %
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1927 (total_files, util.bytecount(total_bytes)))
1934 (total_files, util.bytecount(total_bytes)))
1928 start = time.time()
1935 start = time.time()
1929 for i in xrange(total_files):
1936 for i in xrange(total_files):
1930 # XXX doesn't support '\n' or '\r' in filenames
1937 # XXX doesn't support '\n' or '\r' in filenames
1931 l = fp.readline()
1938 l = fp.readline()
1932 try:
1939 try:
1933 name, size = l.split('\0', 1)
1940 name, size = l.split('\0', 1)
1934 size = int(size)
1941 size = int(size)
1935 except (ValueError, TypeError):
1942 except (ValueError, TypeError):
1936 raise error.ResponseError(
1943 raise error.ResponseError(
1937 _('Unexpected response from remote server:'), l)
1944 _('Unexpected response from remote server:'), l)
1938 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1939 # for backwards compat, name was partially encoded
1946 # for backwards compat, name was partially encoded
1940 ofp = self.sopener(store.decodedir(name), 'w')
1947 ofp = self.sopener(store.decodedir(name), 'w')
1941 for chunk in util.filechunkiter(fp, limit=size):
1948 for chunk in util.filechunkiter(fp, limit=size):
1942 ofp.write(chunk)
1949 ofp.write(chunk)
1943 ofp.close()
1950 ofp.close()
1944 elapsed = time.time() - start
1951 elapsed = time.time() - start
1945 if elapsed <= 0:
1952 if elapsed <= 0:
1946 elapsed = 0.001
1953 elapsed = 0.001
1947 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1948 (util.bytecount(total_bytes), elapsed,
1955 (util.bytecount(total_bytes), elapsed,
1949 util.bytecount(total_bytes / elapsed)))
1956 util.bytecount(total_bytes / elapsed)))
1950
1957
1951 # new requirements = old non-format requirements + new format-related
1958 # new requirements = old non-format requirements + new format-related
1952 # requirements from the streamed-in repository
1959 # requirements from the streamed-in repository
1953 requirements.update(set(self.requirements) - self.supportedformats)
1960 requirements.update(set(self.requirements) - self.supportedformats)
1954 self._applyrequirements(requirements)
1961 self._applyrequirements(requirements)
1955 self._writerequirements()
1962 self._writerequirements()
1956
1963
1957 self.invalidate()
1964 self.invalidate()
1958 return len(self.heads()) + 1
1965 return len(self.heads()) + 1
1959
1966
1960 def clone(self, remote, heads=[], stream=False):
1967 def clone(self, remote, heads=[], stream=False):
1961 '''clone remote repository.
1968 '''clone remote repository.
1962
1969
1963 keyword arguments:
1970 keyword arguments:
1964 heads: list of revs to clone (forces use of pull)
1971 heads: list of revs to clone (forces use of pull)
1965 stream: use streaming clone if possible'''
1972 stream: use streaming clone if possible'''
1966
1973
1967 # now, all clients that can request uncompressed clones can
1974 # now, all clients that can request uncompressed clones can
1968 # read repo formats supported by all servers that can serve
1975 # read repo formats supported by all servers that can serve
1969 # them.
1976 # them.
1970
1977
1971 # if revlog format changes, client will have to check version
1978 # if revlog format changes, client will have to check version
1972 # and format flags on "stream" capability, and use
1979 # and format flags on "stream" capability, and use
1973 # uncompressed only if compatible.
1980 # uncompressed only if compatible.
1974
1981
1975 if stream and not heads:
1982 if stream and not heads:
1976 # 'stream' means remote revlog format is revlogv1 only
1983 # 'stream' means remote revlog format is revlogv1 only
1977 if remote.capable('stream'):
1984 if remote.capable('stream'):
1978 return self.stream_in(remote, set(('revlogv1',)))
1985 return self.stream_in(remote, set(('revlogv1',)))
1979 # otherwise, 'streamreqs' contains the remote revlog format
1986 # otherwise, 'streamreqs' contains the remote revlog format
1980 streamreqs = remote.capable('streamreqs')
1987 streamreqs = remote.capable('streamreqs')
1981 if streamreqs:
1988 if streamreqs:
1982 streamreqs = set(streamreqs.split(','))
1989 streamreqs = set(streamreqs.split(','))
1983 # if we support it, stream in and adjust our requirements
1990 # if we support it, stream in and adjust our requirements
1984 if not streamreqs - self.supportedformats:
1991 if not streamreqs - self.supportedformats:
1985 return self.stream_in(remote, streamreqs)
1992 return self.stream_in(remote, streamreqs)
1986 return self.pull(remote, heads)
1993 return self.pull(remote, heads)
1987
1994
1988 def pushkey(self, namespace, key, old, new):
1995 def pushkey(self, namespace, key, old, new):
1989 return pushkey.push(self, namespace, key, old, new)
1996 return pushkey.push(self, namespace, key, old, new)
1990
1997
1991 def listkeys(self, namespace):
1998 def listkeys(self, namespace):
1992 return pushkey.list(self, namespace)
1999 return pushkey.list(self, namespace)
1993
2000
1994 # used to avoid circular references so destructors work
2001 # used to avoid circular references so destructors work
1995 def aftertrans(files):
2002 def aftertrans(files):
1996 renamefiles = [tuple(t) for t in files]
2003 renamefiles = [tuple(t) for t in files]
1997 def a():
2004 def a():
1998 for src, dest in renamefiles:
2005 for src, dest in renamefiles:
1999 util.rename(src, dest)
2006 util.rename(src, dest)
2000 return a
2007 return a
2001
2008
2002 def instance(ui, path, create):
2009 def instance(ui, path, create):
2003 return localrepository(ui, util.drop_scheme('file', path), create)
2010 return localrepository(ui, util.drop_scheme('file', path), create)
2004
2011
2005 def islocal(path):
2012 def islocal(path):
2006 return True
2013 return True
General Comments 0
You need to be logged in to leave comments. Login now