##// END OF EJS Templates
localrepo: add "vfs" fields to "localrepository" for migration from "opener"...
FUJIWARA Katsunori -
r17156:70343650 default
parent child Browse files
Show More
@@ -1,2454 +1,2457
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 class localrepository(repo.repository):
26 class localrepository(repo.repository):
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 'known', 'getbundle'))
28 'known', 'getbundle'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
30 supported = supportedformats | set(('store', 'fncache', 'shared',
30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 'dotencode'))
31 'dotencode'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
33 requirements = ['revlogv1']
33 requirements = ['revlogv1']
34
34
35 def _baserequirements(self, create):
35 def _baserequirements(self, create):
36 return self.requirements[:]
36 return self.requirements[:]
37
37
38 def __init__(self, baseui, path=None, create=False):
38 def __init__(self, baseui, path=None, create=False):
39 repo.repository.__init__(self)
39 repo.repository.__init__(self)
40 self.root = os.path.realpath(util.expandpath(path))
40 self.root = os.path.realpath(util.expandpath(path))
41 self.path = os.path.join(self.root, ".hg")
41 self.path = os.path.join(self.root, ".hg")
42 self.origroot = path
42 self.origroot = path
43 self.auditor = scmutil.pathauditor(self.root, self._checknested)
43 self.auditor = scmutil.pathauditor(self.root, self._checknested)
44 self.opener = scmutil.opener(self.path)
44 self.opener = scmutil.opener(self.path)
45 self.vfs = self.opener
45 self.wopener = scmutil.opener(self.root)
46 self.wopener = scmutil.opener(self.root)
47 self.wvfs = self.wopener
46 self.baseui = baseui
48 self.baseui = baseui
47 self.ui = baseui.copy()
49 self.ui = baseui.copy()
48 # A list of callback to shape the phase if no data were found.
50 # A list of callback to shape the phase if no data were found.
49 # Callback are in the form: func(repo, roots) --> processed root.
51 # Callback are in the form: func(repo, roots) --> processed root.
50 # This list it to be filled by extension during repo setup
52 # This list it to be filled by extension during repo setup
51 self._phasedefaults = []
53 self._phasedefaults = []
52
54
53 try:
55 try:
54 self.ui.readconfig(self.join("hgrc"), self.root)
56 self.ui.readconfig(self.join("hgrc"), self.root)
55 extensions.loadall(self.ui)
57 extensions.loadall(self.ui)
56 except IOError:
58 except IOError:
57 pass
59 pass
58
60
59 if not os.path.isdir(self.path):
61 if not os.path.isdir(self.path):
60 if create:
62 if create:
61 if not os.path.exists(path):
63 if not os.path.exists(path):
62 util.makedirs(path)
64 util.makedirs(path)
63 util.makedir(self.path, notindexed=True)
65 util.makedir(self.path, notindexed=True)
64 requirements = self._baserequirements(create)
66 requirements = self._baserequirements(create)
65 if self.ui.configbool('format', 'usestore', True):
67 if self.ui.configbool('format', 'usestore', True):
66 os.mkdir(os.path.join(self.path, "store"))
68 os.mkdir(os.path.join(self.path, "store"))
67 requirements.append("store")
69 requirements.append("store")
68 if self.ui.configbool('format', 'usefncache', True):
70 if self.ui.configbool('format', 'usefncache', True):
69 requirements.append("fncache")
71 requirements.append("fncache")
70 if self.ui.configbool('format', 'dotencode', True):
72 if self.ui.configbool('format', 'dotencode', True):
71 requirements.append('dotencode')
73 requirements.append('dotencode')
72 # create an invalid changelog
74 # create an invalid changelog
73 self.opener.append(
75 self.opener.append(
74 "00changelog.i",
76 "00changelog.i",
75 '\0\0\0\2' # represents revlogv2
77 '\0\0\0\2' # represents revlogv2
76 ' dummy changelog to prevent using the old repo layout'
78 ' dummy changelog to prevent using the old repo layout'
77 )
79 )
78 if self.ui.configbool('format', 'generaldelta', False):
80 if self.ui.configbool('format', 'generaldelta', False):
79 requirements.append("generaldelta")
81 requirements.append("generaldelta")
80 requirements = set(requirements)
82 requirements = set(requirements)
81 else:
83 else:
82 raise error.RepoError(_("repository %s not found") % path)
84 raise error.RepoError(_("repository %s not found") % path)
83 elif create:
85 elif create:
84 raise error.RepoError(_("repository %s already exists") % path)
86 raise error.RepoError(_("repository %s already exists") % path)
85 else:
87 else:
86 try:
88 try:
87 requirements = scmutil.readrequires(self.opener, self.supported)
89 requirements = scmutil.readrequires(self.opener, self.supported)
88 except IOError, inst:
90 except IOError, inst:
89 if inst.errno != errno.ENOENT:
91 if inst.errno != errno.ENOENT:
90 raise
92 raise
91 requirements = set()
93 requirements = set()
92
94
93 self.sharedpath = self.path
95 self.sharedpath = self.path
94 try:
96 try:
95 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
96 if not os.path.exists(s):
98 if not os.path.exists(s):
97 raise error.RepoError(
99 raise error.RepoError(
98 _('.hg/sharedpath points to nonexistent directory %s') % s)
100 _('.hg/sharedpath points to nonexistent directory %s') % s)
99 self.sharedpath = s
101 self.sharedpath = s
100 except IOError, inst:
102 except IOError, inst:
101 if inst.errno != errno.ENOENT:
103 if inst.errno != errno.ENOENT:
102 raise
104 raise
103
105
104 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
105 self.spath = self.store.path
107 self.spath = self.store.path
106 self.sopener = self.store.opener
108 self.sopener = self.store.opener
109 self.svfs = self.sopener
107 self.sjoin = self.store.join
110 self.sjoin = self.store.join
108 self.opener.createmode = self.store.createmode
111 self.opener.createmode = self.store.createmode
109 self._applyrequirements(requirements)
112 self._applyrequirements(requirements)
110 if create:
113 if create:
111 self._writerequirements()
114 self._writerequirements()
112
115
113
116
114 self._branchcache = None
117 self._branchcache = None
115 self._branchcachetip = None
118 self._branchcachetip = None
116 self.filterpats = {}
119 self.filterpats = {}
117 self._datafilters = {}
120 self._datafilters = {}
118 self._transref = self._lockref = self._wlockref = None
121 self._transref = self._lockref = self._wlockref = None
119
122
120 # A cache for various files under .hg/ that tracks file changes,
123 # A cache for various files under .hg/ that tracks file changes,
121 # (used by the filecache decorator)
124 # (used by the filecache decorator)
122 #
125 #
123 # Maps a property name to its util.filecacheentry
126 # Maps a property name to its util.filecacheentry
124 self._filecache = {}
127 self._filecache = {}
125
128
126 def _applyrequirements(self, requirements):
129 def _applyrequirements(self, requirements):
127 self.requirements = requirements
130 self.requirements = requirements
128 self.sopener.options = dict((r, 1) for r in requirements
131 self.sopener.options = dict((r, 1) for r in requirements
129 if r in self.openerreqs)
132 if r in self.openerreqs)
130
133
131 def _writerequirements(self):
134 def _writerequirements(self):
132 reqfile = self.opener("requires", "w")
135 reqfile = self.opener("requires", "w")
133 for r in self.requirements:
136 for r in self.requirements:
134 reqfile.write("%s\n" % r)
137 reqfile.write("%s\n" % r)
135 reqfile.close()
138 reqfile.close()
136
139
137 def _checknested(self, path):
140 def _checknested(self, path):
138 """Determine if path is a legal nested repository."""
141 """Determine if path is a legal nested repository."""
139 if not path.startswith(self.root):
142 if not path.startswith(self.root):
140 return False
143 return False
141 subpath = path[len(self.root) + 1:]
144 subpath = path[len(self.root) + 1:]
142 normsubpath = util.pconvert(subpath)
145 normsubpath = util.pconvert(subpath)
143
146
144 # XXX: Checking against the current working copy is wrong in
147 # XXX: Checking against the current working copy is wrong in
145 # the sense that it can reject things like
148 # the sense that it can reject things like
146 #
149 #
147 # $ hg cat -r 10 sub/x.txt
150 # $ hg cat -r 10 sub/x.txt
148 #
151 #
149 # if sub/ is no longer a subrepository in the working copy
152 # if sub/ is no longer a subrepository in the working copy
150 # parent revision.
153 # parent revision.
151 #
154 #
152 # However, it can of course also allow things that would have
155 # However, it can of course also allow things that would have
153 # been rejected before, such as the above cat command if sub/
156 # been rejected before, such as the above cat command if sub/
154 # is a subrepository now, but was a normal directory before.
157 # is a subrepository now, but was a normal directory before.
155 # The old path auditor would have rejected by mistake since it
158 # The old path auditor would have rejected by mistake since it
156 # panics when it sees sub/.hg/.
159 # panics when it sees sub/.hg/.
157 #
160 #
158 # All in all, checking against the working copy seems sensible
161 # All in all, checking against the working copy seems sensible
159 # since we want to prevent access to nested repositories on
162 # since we want to prevent access to nested repositories on
160 # the filesystem *now*.
163 # the filesystem *now*.
161 ctx = self[None]
164 ctx = self[None]
162 parts = util.splitpath(subpath)
165 parts = util.splitpath(subpath)
163 while parts:
166 while parts:
164 prefix = '/'.join(parts)
167 prefix = '/'.join(parts)
165 if prefix in ctx.substate:
168 if prefix in ctx.substate:
166 if prefix == normsubpath:
169 if prefix == normsubpath:
167 return True
170 return True
168 else:
171 else:
169 sub = ctx.sub(prefix)
172 sub = ctx.sub(prefix)
170 return sub.checknested(subpath[len(prefix) + 1:])
173 return sub.checknested(subpath[len(prefix) + 1:])
171 else:
174 else:
172 parts.pop()
175 parts.pop()
173 return False
176 return False
174
177
175 @filecache('bookmarks')
178 @filecache('bookmarks')
176 def _bookmarks(self):
179 def _bookmarks(self):
177 return bookmarks.read(self)
180 return bookmarks.read(self)
178
181
179 @filecache('bookmarks.current')
182 @filecache('bookmarks.current')
180 def _bookmarkcurrent(self):
183 def _bookmarkcurrent(self):
181 return bookmarks.readcurrent(self)
184 return bookmarks.readcurrent(self)
182
185
183 def _writebookmarks(self, marks):
186 def _writebookmarks(self, marks):
184 bookmarks.write(self)
187 bookmarks.write(self)
185
188
186 def bookmarkheads(self, bookmark):
189 def bookmarkheads(self, bookmark):
187 name = bookmark.split('@', 1)[0]
190 name = bookmark.split('@', 1)[0]
188 heads = []
191 heads = []
189 for mark, n in self._bookmarks.iteritems():
192 for mark, n in self._bookmarks.iteritems():
190 if mark.split('@', 1)[0] == name:
193 if mark.split('@', 1)[0] == name:
191 heads.append(n)
194 heads.append(n)
192 return heads
195 return heads
193
196
194 @storecache('phaseroots')
197 @storecache('phaseroots')
195 def _phasecache(self):
198 def _phasecache(self):
196 return phases.phasecache(self, self._phasedefaults)
199 return phases.phasecache(self, self._phasedefaults)
197
200
198 @storecache('obsstore')
201 @storecache('obsstore')
199 def obsstore(self):
202 def obsstore(self):
200 store = obsolete.obsstore(self.sopener)
203 store = obsolete.obsstore(self.sopener)
201 return store
204 return store
202
205
203 @storecache('00changelog.i')
206 @storecache('00changelog.i')
204 def changelog(self):
207 def changelog(self):
205 c = changelog.changelog(self.sopener)
208 c = changelog.changelog(self.sopener)
206 if 'HG_PENDING' in os.environ:
209 if 'HG_PENDING' in os.environ:
207 p = os.environ['HG_PENDING']
210 p = os.environ['HG_PENDING']
208 if p.startswith(self.root):
211 if p.startswith(self.root):
209 c.readpending('00changelog.i.a')
212 c.readpending('00changelog.i.a')
210 return c
213 return c
211
214
212 @storecache('00manifest.i')
215 @storecache('00manifest.i')
213 def manifest(self):
216 def manifest(self):
214 return manifest.manifest(self.sopener)
217 return manifest.manifest(self.sopener)
215
218
216 @filecache('dirstate')
219 @filecache('dirstate')
217 def dirstate(self):
220 def dirstate(self):
218 warned = [0]
221 warned = [0]
219 def validate(node):
222 def validate(node):
220 try:
223 try:
221 self.changelog.rev(node)
224 self.changelog.rev(node)
222 return node
225 return node
223 except error.LookupError:
226 except error.LookupError:
224 if not warned[0]:
227 if not warned[0]:
225 warned[0] = True
228 warned[0] = True
226 self.ui.warn(_("warning: ignoring unknown"
229 self.ui.warn(_("warning: ignoring unknown"
227 " working parent %s!\n") % short(node))
230 " working parent %s!\n") % short(node))
228 return nullid
231 return nullid
229
232
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231
234
232 def __getitem__(self, changeid):
235 def __getitem__(self, changeid):
233 if changeid is None:
236 if changeid is None:
234 return context.workingctx(self)
237 return context.workingctx(self)
235 return context.changectx(self, changeid)
238 return context.changectx(self, changeid)
236
239
237 def __contains__(self, changeid):
240 def __contains__(self, changeid):
238 try:
241 try:
239 return bool(self.lookup(changeid))
242 return bool(self.lookup(changeid))
240 except error.RepoLookupError:
243 except error.RepoLookupError:
241 return False
244 return False
242
245
243 def __nonzero__(self):
246 def __nonzero__(self):
244 return True
247 return True
245
248
246 def __len__(self):
249 def __len__(self):
247 return len(self.changelog)
250 return len(self.changelog)
248
251
249 def __iter__(self):
252 def __iter__(self):
250 for i in xrange(len(self)):
253 for i in xrange(len(self)):
251 yield i
254 yield i
252
255
253 def revs(self, expr, *args):
256 def revs(self, expr, *args):
254 '''Return a list of revisions matching the given revset'''
257 '''Return a list of revisions matching the given revset'''
255 expr = revset.formatspec(expr, *args)
258 expr = revset.formatspec(expr, *args)
256 m = revset.match(None, expr)
259 m = revset.match(None, expr)
257 return [r for r in m(self, range(len(self)))]
260 return [r for r in m(self, range(len(self)))]
258
261
259 def set(self, expr, *args):
262 def set(self, expr, *args):
260 '''
263 '''
261 Yield a context for each matching revision, after doing arg
264 Yield a context for each matching revision, after doing arg
262 replacement via revset.formatspec
265 replacement via revset.formatspec
263 '''
266 '''
264 for r in self.revs(expr, *args):
267 for r in self.revs(expr, *args):
265 yield self[r]
268 yield self[r]
266
269
267 def url(self):
270 def url(self):
268 return 'file:' + self.root
271 return 'file:' + self.root
269
272
270 def hook(self, name, throw=False, **args):
273 def hook(self, name, throw=False, **args):
271 return hook.hook(self.ui, self, name, throw, **args)
274 return hook.hook(self.ui, self, name, throw, **args)
272
275
273 tag_disallowed = ':\r\n'
276 tag_disallowed = ':\r\n'
274
277
275 def _tag(self, names, node, message, local, user, date, extra={}):
278 def _tag(self, names, node, message, local, user, date, extra={}):
276 if isinstance(names, str):
279 if isinstance(names, str):
277 allchars = names
280 allchars = names
278 names = (names,)
281 names = (names,)
279 else:
282 else:
280 allchars = ''.join(names)
283 allchars = ''.join(names)
281 for c in self.tag_disallowed:
284 for c in self.tag_disallowed:
282 if c in allchars:
285 if c in allchars:
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
284
287
285 branches = self.branchmap()
288 branches = self.branchmap()
286 for name in names:
289 for name in names:
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 local=local)
291 local=local)
289 if name in branches:
292 if name in branches:
290 self.ui.warn(_("warning: tag %s conflicts with existing"
293 self.ui.warn(_("warning: tag %s conflicts with existing"
291 " branch name\n") % name)
294 " branch name\n") % name)
292
295
293 def writetags(fp, names, munge, prevtags):
296 def writetags(fp, names, munge, prevtags):
294 fp.seek(0, 2)
297 fp.seek(0, 2)
295 if prevtags and prevtags[-1] != '\n':
298 if prevtags and prevtags[-1] != '\n':
296 fp.write('\n')
299 fp.write('\n')
297 for name in names:
300 for name in names:
298 m = munge and munge(name) or name
301 m = munge and munge(name) or name
299 if (self._tagscache.tagtypes and
302 if (self._tagscache.tagtypes and
300 name in self._tagscache.tagtypes):
303 name in self._tagscache.tagtypes):
301 old = self.tags().get(name, nullid)
304 old = self.tags().get(name, nullid)
302 fp.write('%s %s\n' % (hex(old), m))
305 fp.write('%s %s\n' % (hex(old), m))
303 fp.write('%s %s\n' % (hex(node), m))
306 fp.write('%s %s\n' % (hex(node), m))
304 fp.close()
307 fp.close()
305
308
306 prevtags = ''
309 prevtags = ''
307 if local:
310 if local:
308 try:
311 try:
309 fp = self.opener('localtags', 'r+')
312 fp = self.opener('localtags', 'r+')
310 except IOError:
313 except IOError:
311 fp = self.opener('localtags', 'a')
314 fp = self.opener('localtags', 'a')
312 else:
315 else:
313 prevtags = fp.read()
316 prevtags = fp.read()
314
317
315 # local tags are stored in the current charset
318 # local tags are stored in the current charset
316 writetags(fp, names, None, prevtags)
319 writetags(fp, names, None, prevtags)
317 for name in names:
320 for name in names:
318 self.hook('tag', node=hex(node), tag=name, local=local)
321 self.hook('tag', node=hex(node), tag=name, local=local)
319 return
322 return
320
323
321 try:
324 try:
322 fp = self.wfile('.hgtags', 'rb+')
325 fp = self.wfile('.hgtags', 'rb+')
323 except IOError, e:
326 except IOError, e:
324 if e.errno != errno.ENOENT:
327 if e.errno != errno.ENOENT:
325 raise
328 raise
326 fp = self.wfile('.hgtags', 'ab')
329 fp = self.wfile('.hgtags', 'ab')
327 else:
330 else:
328 prevtags = fp.read()
331 prevtags = fp.read()
329
332
330 # committed tags are stored in UTF-8
333 # committed tags are stored in UTF-8
331 writetags(fp, names, encoding.fromlocal, prevtags)
334 writetags(fp, names, encoding.fromlocal, prevtags)
332
335
333 fp.close()
336 fp.close()
334
337
335 self.invalidatecaches()
338 self.invalidatecaches()
336
339
337 if '.hgtags' not in self.dirstate:
340 if '.hgtags' not in self.dirstate:
338 self[None].add(['.hgtags'])
341 self[None].add(['.hgtags'])
339
342
340 m = matchmod.exact(self.root, '', ['.hgtags'])
343 m = matchmod.exact(self.root, '', ['.hgtags'])
341 tagnode = self.commit(message, user, date, extra=extra, match=m)
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
342
345
343 for name in names:
346 for name in names:
344 self.hook('tag', node=hex(node), tag=name, local=local)
347 self.hook('tag', node=hex(node), tag=name, local=local)
345
348
346 return tagnode
349 return tagnode
347
350
348 def tag(self, names, node, message, local, user, date):
351 def tag(self, names, node, message, local, user, date):
349 '''tag a revision with one or more symbolic names.
352 '''tag a revision with one or more symbolic names.
350
353
351 names is a list of strings or, when adding a single tag, names may be a
354 names is a list of strings or, when adding a single tag, names may be a
352 string.
355 string.
353
356
354 if local is True, the tags are stored in a per-repository file.
357 if local is True, the tags are stored in a per-repository file.
355 otherwise, they are stored in the .hgtags file, and a new
358 otherwise, they are stored in the .hgtags file, and a new
356 changeset is committed with the change.
359 changeset is committed with the change.
357
360
358 keyword arguments:
361 keyword arguments:
359
362
360 local: whether to store tags in non-version-controlled file
363 local: whether to store tags in non-version-controlled file
361 (default False)
364 (default False)
362
365
363 message: commit message to use if committing
366 message: commit message to use if committing
364
367
365 user: name of user to use if committing
368 user: name of user to use if committing
366
369
367 date: date tuple to use if committing'''
370 date: date tuple to use if committing'''
368
371
369 if not local:
372 if not local:
370 for x in self.status()[:5]:
373 for x in self.status()[:5]:
371 if '.hgtags' in x:
374 if '.hgtags' in x:
372 raise util.Abort(_('working copy of .hgtags is changed '
375 raise util.Abort(_('working copy of .hgtags is changed '
373 '(please commit .hgtags manually)'))
376 '(please commit .hgtags manually)'))
374
377
375 self.tags() # instantiate the cache
378 self.tags() # instantiate the cache
376 self._tag(names, node, message, local, user, date)
379 self._tag(names, node, message, local, user, date)
377
380
378 @propertycache
381 @propertycache
379 def _tagscache(self):
382 def _tagscache(self):
380 '''Returns a tagscache object that contains various tags related
383 '''Returns a tagscache object that contains various tags related
381 caches.'''
384 caches.'''
382
385
383 # This simplifies its cache management by having one decorated
386 # This simplifies its cache management by having one decorated
384 # function (this one) and the rest simply fetch things from it.
387 # function (this one) and the rest simply fetch things from it.
385 class tagscache(object):
388 class tagscache(object):
386 def __init__(self):
389 def __init__(self):
387 # These two define the set of tags for this repository. tags
390 # These two define the set of tags for this repository. tags
388 # maps tag name to node; tagtypes maps tag name to 'global' or
391 # maps tag name to node; tagtypes maps tag name to 'global' or
389 # 'local'. (Global tags are defined by .hgtags across all
392 # 'local'. (Global tags are defined by .hgtags across all
390 # heads, and local tags are defined in .hg/localtags.)
393 # heads, and local tags are defined in .hg/localtags.)
391 # They constitute the in-memory cache of tags.
394 # They constitute the in-memory cache of tags.
392 self.tags = self.tagtypes = None
395 self.tags = self.tagtypes = None
393
396
394 self.nodetagscache = self.tagslist = None
397 self.nodetagscache = self.tagslist = None
395
398
396 cache = tagscache()
399 cache = tagscache()
397 cache.tags, cache.tagtypes = self._findtags()
400 cache.tags, cache.tagtypes = self._findtags()
398
401
399 return cache
402 return cache
400
403
401 def tags(self):
404 def tags(self):
402 '''return a mapping of tag to node'''
405 '''return a mapping of tag to node'''
403 t = {}
406 t = {}
404 for k, v in self._tagscache.tags.iteritems():
407 for k, v in self._tagscache.tags.iteritems():
405 try:
408 try:
406 # ignore tags to unknown nodes
409 # ignore tags to unknown nodes
407 self.changelog.rev(v)
410 self.changelog.rev(v)
408 t[k] = v
411 t[k] = v
409 except (error.LookupError, ValueError):
412 except (error.LookupError, ValueError):
410 pass
413 pass
411 return t
414 return t
412
415
413 def _findtags(self):
416 def _findtags(self):
414 '''Do the hard work of finding tags. Return a pair of dicts
417 '''Do the hard work of finding tags. Return a pair of dicts
415 (tags, tagtypes) where tags maps tag name to node, and tagtypes
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
416 maps tag name to a string like \'global\' or \'local\'.
419 maps tag name to a string like \'global\' or \'local\'.
417 Subclasses or extensions are free to add their own tags, but
420 Subclasses or extensions are free to add their own tags, but
418 should be aware that the returned dicts will be retained for the
421 should be aware that the returned dicts will be retained for the
419 duration of the localrepo object.'''
422 duration of the localrepo object.'''
420
423
421 # XXX what tagtype should subclasses/extensions use? Currently
424 # XXX what tagtype should subclasses/extensions use? Currently
422 # mq and bookmarks add tags, but do not set the tagtype at all.
425 # mq and bookmarks add tags, but do not set the tagtype at all.
423 # Should each extension invent its own tag type? Should there
426 # Should each extension invent its own tag type? Should there
424 # be one tagtype for all such "virtual" tags? Or is the status
427 # be one tagtype for all such "virtual" tags? Or is the status
425 # quo fine?
428 # quo fine?
426
429
427 alltags = {} # map tag name to (node, hist)
430 alltags = {} # map tag name to (node, hist)
428 tagtypes = {}
431 tagtypes = {}
429
432
430 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
431 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
432
435
433 # Build the return dicts. Have to re-encode tag names because
436 # Build the return dicts. Have to re-encode tag names because
434 # the tags module always uses UTF-8 (in order not to lose info
437 # the tags module always uses UTF-8 (in order not to lose info
435 # writing to the cache), but the rest of Mercurial wants them in
438 # writing to the cache), but the rest of Mercurial wants them in
436 # local encoding.
439 # local encoding.
437 tags = {}
440 tags = {}
438 for (name, (node, hist)) in alltags.iteritems():
441 for (name, (node, hist)) in alltags.iteritems():
439 if node != nullid:
442 if node != nullid:
440 tags[encoding.tolocal(name)] = node
443 tags[encoding.tolocal(name)] = node
441 tags['tip'] = self.changelog.tip()
444 tags['tip'] = self.changelog.tip()
442 tagtypes = dict([(encoding.tolocal(name), value)
445 tagtypes = dict([(encoding.tolocal(name), value)
443 for (name, value) in tagtypes.iteritems()])
446 for (name, value) in tagtypes.iteritems()])
444 return (tags, tagtypes)
447 return (tags, tagtypes)
445
448
446 def tagtype(self, tagname):
449 def tagtype(self, tagname):
447 '''
450 '''
448 return the type of the given tag. result can be:
451 return the type of the given tag. result can be:
449
452
450 'local' : a local tag
453 'local' : a local tag
451 'global' : a global tag
454 'global' : a global tag
452 None : tag does not exist
455 None : tag does not exist
453 '''
456 '''
454
457
455 return self._tagscache.tagtypes.get(tagname)
458 return self._tagscache.tagtypes.get(tagname)
456
459
457 def tagslist(self):
460 def tagslist(self):
458 '''return a list of tags ordered by revision'''
461 '''return a list of tags ordered by revision'''
459 if not self._tagscache.tagslist:
462 if not self._tagscache.tagslist:
460 l = []
463 l = []
461 for t, n in self.tags().iteritems():
464 for t, n in self.tags().iteritems():
462 r = self.changelog.rev(n)
465 r = self.changelog.rev(n)
463 l.append((r, t, n))
466 l.append((r, t, n))
464 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
465
468
466 return self._tagscache.tagslist
469 return self._tagscache.tagslist
467
470
468 def nodetags(self, node):
471 def nodetags(self, node):
469 '''return the tags associated with a node'''
472 '''return the tags associated with a node'''
470 if not self._tagscache.nodetagscache:
473 if not self._tagscache.nodetagscache:
471 nodetagscache = {}
474 nodetagscache = {}
472 for t, n in self._tagscache.tags.iteritems():
475 for t, n in self._tagscache.tags.iteritems():
473 nodetagscache.setdefault(n, []).append(t)
476 nodetagscache.setdefault(n, []).append(t)
474 for tags in nodetagscache.itervalues():
477 for tags in nodetagscache.itervalues():
475 tags.sort()
478 tags.sort()
476 self._tagscache.nodetagscache = nodetagscache
479 self._tagscache.nodetagscache = nodetagscache
477 return self._tagscache.nodetagscache.get(node, [])
480 return self._tagscache.nodetagscache.get(node, [])
478
481
479 def nodebookmarks(self, node):
482 def nodebookmarks(self, node):
480 marks = []
483 marks = []
481 for bookmark, n in self._bookmarks.iteritems():
484 for bookmark, n in self._bookmarks.iteritems():
482 if n == node:
485 if n == node:
483 marks.append(bookmark)
486 marks.append(bookmark)
484 return sorted(marks)
487 return sorted(marks)
485
488
486 def _branchtags(self, partial, lrev):
489 def _branchtags(self, partial, lrev):
487 # TODO: rename this function?
490 # TODO: rename this function?
488 tiprev = len(self) - 1
491 tiprev = len(self) - 1
489 if lrev != tiprev:
492 if lrev != tiprev:
490 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
491 self._updatebranchcache(partial, ctxgen)
494 self._updatebranchcache(partial, ctxgen)
492 self._writebranchcache(partial, self.changelog.tip(), tiprev)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
493
496
494 return partial
497 return partial
495
498
496 def updatebranchcache(self):
499 def updatebranchcache(self):
497 tip = self.changelog.tip()
500 tip = self.changelog.tip()
498 if self._branchcache is not None and self._branchcachetip == tip:
501 if self._branchcache is not None and self._branchcachetip == tip:
499 return
502 return
500
503
501 oldtip = self._branchcachetip
504 oldtip = self._branchcachetip
502 self._branchcachetip = tip
505 self._branchcachetip = tip
503 if oldtip is None or oldtip not in self.changelog.nodemap:
506 if oldtip is None or oldtip not in self.changelog.nodemap:
504 partial, last, lrev = self._readbranchcache()
507 partial, last, lrev = self._readbranchcache()
505 else:
508 else:
506 lrev = self.changelog.rev(oldtip)
509 lrev = self.changelog.rev(oldtip)
507 partial = self._branchcache
510 partial = self._branchcache
508
511
509 self._branchtags(partial, lrev)
512 self._branchtags(partial, lrev)
510 # this private cache holds all heads (not just the branch tips)
513 # this private cache holds all heads (not just the branch tips)
511 self._branchcache = partial
514 self._branchcache = partial
512
515
513 def branchmap(self):
516 def branchmap(self):
514 '''returns a dictionary {branch: [branchheads]}'''
517 '''returns a dictionary {branch: [branchheads]}'''
515 self.updatebranchcache()
518 self.updatebranchcache()
516 return self._branchcache
519 return self._branchcache
517
520
518 def _branchtip(self, heads):
521 def _branchtip(self, heads):
519 '''return the tipmost branch head in heads'''
522 '''return the tipmost branch head in heads'''
520 tip = heads[-1]
523 tip = heads[-1]
521 for h in reversed(heads):
524 for h in reversed(heads):
522 if not self[h].closesbranch():
525 if not self[h].closesbranch():
523 tip = h
526 tip = h
524 break
527 break
525 return tip
528 return tip
526
529
527 def branchtip(self, branch):
530 def branchtip(self, branch):
528 '''return the tip node for a given branch'''
531 '''return the tip node for a given branch'''
529 if branch not in self.branchmap():
532 if branch not in self.branchmap():
530 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
531 return self._branchtip(self.branchmap()[branch])
534 return self._branchtip(self.branchmap()[branch])
532
535
533 def branchtags(self):
536 def branchtags(self):
534 '''return a dict where branch names map to the tipmost head of
537 '''return a dict where branch names map to the tipmost head of
535 the branch, open heads come before closed'''
538 the branch, open heads come before closed'''
536 bt = {}
539 bt = {}
537 for bn, heads in self.branchmap().iteritems():
540 for bn, heads in self.branchmap().iteritems():
538 bt[bn] = self._branchtip(heads)
541 bt[bn] = self._branchtip(heads)
539 return bt
542 return bt
540
543
541 def _readbranchcache(self):
544 def _readbranchcache(self):
542 partial = {}
545 partial = {}
543 try:
546 try:
544 f = self.opener("cache/branchheads")
547 f = self.opener("cache/branchheads")
545 lines = f.read().split('\n')
548 lines = f.read().split('\n')
546 f.close()
549 f.close()
547 except (IOError, OSError):
550 except (IOError, OSError):
548 return {}, nullid, nullrev
551 return {}, nullid, nullrev
549
552
550 try:
553 try:
551 last, lrev = lines.pop(0).split(" ", 1)
554 last, lrev = lines.pop(0).split(" ", 1)
552 last, lrev = bin(last), int(lrev)
555 last, lrev = bin(last), int(lrev)
553 if lrev >= len(self) or self[lrev].node() != last:
556 if lrev >= len(self) or self[lrev].node() != last:
554 # invalidate the cache
557 # invalidate the cache
555 raise ValueError('invalidating branch cache (tip differs)')
558 raise ValueError('invalidating branch cache (tip differs)')
556 for l in lines:
559 for l in lines:
557 if not l:
560 if not l:
558 continue
561 continue
559 node, label = l.split(" ", 1)
562 node, label = l.split(" ", 1)
560 label = encoding.tolocal(label.strip())
563 label = encoding.tolocal(label.strip())
561 if not node in self:
564 if not node in self:
562 raise ValueError('invalidating branch cache because node '+
565 raise ValueError('invalidating branch cache because node '+
563 '%s does not exist' % node)
566 '%s does not exist' % node)
564 partial.setdefault(label, []).append(bin(node))
567 partial.setdefault(label, []).append(bin(node))
565 except KeyboardInterrupt:
568 except KeyboardInterrupt:
566 raise
569 raise
567 except Exception, inst:
570 except Exception, inst:
568 if self.ui.debugflag:
571 if self.ui.debugflag:
569 self.ui.warn(str(inst), '\n')
572 self.ui.warn(str(inst), '\n')
570 partial, last, lrev = {}, nullid, nullrev
573 partial, last, lrev = {}, nullid, nullrev
571 return partial, last, lrev
574 return partial, last, lrev
572
575
573 def _writebranchcache(self, branches, tip, tiprev):
576 def _writebranchcache(self, branches, tip, tiprev):
574 try:
577 try:
575 f = self.opener("cache/branchheads", "w", atomictemp=True)
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
576 f.write("%s %s\n" % (hex(tip), tiprev))
579 f.write("%s %s\n" % (hex(tip), tiprev))
577 for label, nodes in branches.iteritems():
580 for label, nodes in branches.iteritems():
578 for node in nodes:
581 for node in nodes:
579 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
580 f.close()
583 f.close()
581 except (IOError, OSError):
584 except (IOError, OSError):
582 pass
585 pass
583
586
584 def _updatebranchcache(self, partial, ctxgen):
587 def _updatebranchcache(self, partial, ctxgen):
585 """Given a branchhead cache, partial, that may have extra nodes or be
588 """Given a branchhead cache, partial, that may have extra nodes or be
586 missing heads, and a generator of nodes that are at least a superset of
589 missing heads, and a generator of nodes that are at least a superset of
587 heads missing, this function updates partial to be correct.
590 heads missing, this function updates partial to be correct.
588 """
591 """
589 # collect new branch entries
592 # collect new branch entries
590 newbranches = {}
593 newbranches = {}
591 for c in ctxgen:
594 for c in ctxgen:
592 newbranches.setdefault(c.branch(), []).append(c.node())
595 newbranches.setdefault(c.branch(), []).append(c.node())
593 # if older branchheads are reachable from new ones, they aren't
596 # if older branchheads are reachable from new ones, they aren't
594 # really branchheads. Note checking parents is insufficient:
597 # really branchheads. Note checking parents is insufficient:
595 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
596 for branch, newnodes in newbranches.iteritems():
599 for branch, newnodes in newbranches.iteritems():
597 bheads = partial.setdefault(branch, [])
600 bheads = partial.setdefault(branch, [])
598 # Remove candidate heads that no longer are in the repo (e.g., as
601 # Remove candidate heads that no longer are in the repo (e.g., as
599 # the result of a strip that just happened). Avoid using 'node in
602 # the result of a strip that just happened). Avoid using 'node in
600 # self' here because that dives down into branchcache code somewhat
603 # self' here because that dives down into branchcache code somewhat
601 # recrusively.
604 # recrusively.
602 bheadrevs = [self.changelog.rev(node) for node in bheads
605 bheadrevs = [self.changelog.rev(node) for node in bheads
603 if self.changelog.hasnode(node)]
606 if self.changelog.hasnode(node)]
604 newheadrevs = [self.changelog.rev(node) for node in newnodes
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
605 if self.changelog.hasnode(node)]
608 if self.changelog.hasnode(node)]
606 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
607 # Remove duplicates - nodes that are in newheadrevs and are already
610 # Remove duplicates - nodes that are in newheadrevs and are already
608 # in bheadrevs. This can happen if you strip a node whose parent
611 # in bheadrevs. This can happen if you strip a node whose parent
609 # was already a head (because they're on different branches).
612 # was already a head (because they're on different branches).
610 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
611
614
612 # Starting from tip means fewer passes over reachable. If we know
615 # Starting from tip means fewer passes over reachable. If we know
613 # the new candidates are not ancestors of existing heads, we don't
616 # the new candidates are not ancestors of existing heads, we don't
614 # have to examine ancestors of existing heads
617 # have to examine ancestors of existing heads
615 if ctxisnew:
618 if ctxisnew:
616 iterrevs = sorted(newheadrevs)
619 iterrevs = sorted(newheadrevs)
617 else:
620 else:
618 iterrevs = list(bheadrevs)
621 iterrevs = list(bheadrevs)
619
622
620 # This loop prunes out two kinds of heads - heads that are
623 # This loop prunes out two kinds of heads - heads that are
621 # superceded by a head in newheadrevs, and newheadrevs that are not
624 # superceded by a head in newheadrevs, and newheadrevs that are not
622 # heads because an existing head is their descendant.
625 # heads because an existing head is their descendant.
623 while iterrevs:
626 while iterrevs:
624 latest = iterrevs.pop()
627 latest = iterrevs.pop()
625 if latest not in bheadrevs:
628 if latest not in bheadrevs:
626 continue
629 continue
627 ancestors = set(self.changelog.ancestors([latest],
630 ancestors = set(self.changelog.ancestors([latest],
628 bheadrevs[0]))
631 bheadrevs[0]))
629 if ancestors:
632 if ancestors:
630 bheadrevs = [b for b in bheadrevs if b not in ancestors]
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
631 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
632
635
633 # There may be branches that cease to exist when the last commit in the
636 # There may be branches that cease to exist when the last commit in the
634 # branch was stripped. This code filters them out. Note that the
637 # branch was stripped. This code filters them out. Note that the
635 # branch that ceased to exist may not be in newbranches because
638 # branch that ceased to exist may not be in newbranches because
636 # newbranches is the set of candidate heads, which when you strip the
639 # newbranches is the set of candidate heads, which when you strip the
637 # last commit in a branch will be the parent branch.
640 # last commit in a branch will be the parent branch.
638 for branch in partial:
641 for branch in partial:
639 nodes = [head for head in partial[branch]
642 nodes = [head for head in partial[branch]
640 if self.changelog.hasnode(head)]
643 if self.changelog.hasnode(head)]
641 if not nodes:
644 if not nodes:
642 del partial[branch]
645 del partial[branch]
643
646
644 def lookup(self, key):
647 def lookup(self, key):
645 return self[key].node()
648 return self[key].node()
646
649
647 def lookupbranch(self, key, remote=None):
650 def lookupbranch(self, key, remote=None):
648 repo = remote or self
651 repo = remote or self
649 if key in repo.branchmap():
652 if key in repo.branchmap():
650 return key
653 return key
651
654
652 repo = (remote and remote.local()) and remote or self
655 repo = (remote and remote.local()) and remote or self
653 return repo[key].branch()
656 return repo[key].branch()
654
657
655 def known(self, nodes):
658 def known(self, nodes):
656 nm = self.changelog.nodemap
659 nm = self.changelog.nodemap
657 pc = self._phasecache
660 pc = self._phasecache
658 result = []
661 result = []
659 for n in nodes:
662 for n in nodes:
660 r = nm.get(n)
663 r = nm.get(n)
661 resp = not (r is None or pc.phase(self, r) >= phases.secret)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
662 result.append(resp)
665 result.append(resp)
663 return result
666 return result
664
667
665 def local(self):
668 def local(self):
666 return self
669 return self
667
670
668 def join(self, f):
671 def join(self, f):
669 return os.path.join(self.path, f)
672 return os.path.join(self.path, f)
670
673
671 def wjoin(self, f):
674 def wjoin(self, f):
672 return os.path.join(self.root, f)
675 return os.path.join(self.root, f)
673
676
674 def file(self, f):
677 def file(self, f):
675 if f[0] == '/':
678 if f[0] == '/':
676 f = f[1:]
679 f = f[1:]
677 return filelog.filelog(self.sopener, f)
680 return filelog.filelog(self.sopener, f)
678
681
679 def changectx(self, changeid):
682 def changectx(self, changeid):
680 return self[changeid]
683 return self[changeid]
681
684
682 def parents(self, changeid=None):
685 def parents(self, changeid=None):
683 '''get list of changectxs for parents of changeid'''
686 '''get list of changectxs for parents of changeid'''
684 return self[changeid].parents()
687 return self[changeid].parents()
685
688
686 def setparents(self, p1, p2=nullid):
689 def setparents(self, p1, p2=nullid):
687 copies = self.dirstate.setparents(p1, p2)
690 copies = self.dirstate.setparents(p1, p2)
688 if copies:
691 if copies:
689 # Adjust copy records, the dirstate cannot do it, it
692 # Adjust copy records, the dirstate cannot do it, it
690 # requires access to parents manifests. Preserve them
693 # requires access to parents manifests. Preserve them
691 # only for entries added to first parent.
694 # only for entries added to first parent.
692 pctx = self[p1]
695 pctx = self[p1]
693 for f in copies:
696 for f in copies:
694 if f not in pctx and copies[f] in pctx:
697 if f not in pctx and copies[f] in pctx:
695 self.dirstate.copy(copies[f], f)
698 self.dirstate.copy(copies[f], f)
696
699
697 def filectx(self, path, changeid=None, fileid=None):
700 def filectx(self, path, changeid=None, fileid=None):
698 """changeid can be a changeset revision, node, or tag.
701 """changeid can be a changeset revision, node, or tag.
699 fileid can be a file revision or node."""
702 fileid can be a file revision or node."""
700 return context.filectx(self, path, changeid, fileid)
703 return context.filectx(self, path, changeid, fileid)
701
704
702 def getcwd(self):
705 def getcwd(self):
703 return self.dirstate.getcwd()
706 return self.dirstate.getcwd()
704
707
705 def pathto(self, f, cwd=None):
708 def pathto(self, f, cwd=None):
706 return self.dirstate.pathto(f, cwd)
709 return self.dirstate.pathto(f, cwd)
707
710
708 def wfile(self, f, mode='r'):
711 def wfile(self, f, mode='r'):
709 return self.wopener(f, mode)
712 return self.wopener(f, mode)
710
713
711 def _link(self, f):
714 def _link(self, f):
712 return os.path.islink(self.wjoin(f))
715 return os.path.islink(self.wjoin(f))
713
716
714 def _loadfilter(self, filter):
717 def _loadfilter(self, filter):
715 if filter not in self.filterpats:
718 if filter not in self.filterpats:
716 l = []
719 l = []
717 for pat, cmd in self.ui.configitems(filter):
720 for pat, cmd in self.ui.configitems(filter):
718 if cmd == '!':
721 if cmd == '!':
719 continue
722 continue
720 mf = matchmod.match(self.root, '', [pat])
723 mf = matchmod.match(self.root, '', [pat])
721 fn = None
724 fn = None
722 params = cmd
725 params = cmd
723 for name, filterfn in self._datafilters.iteritems():
726 for name, filterfn in self._datafilters.iteritems():
724 if cmd.startswith(name):
727 if cmd.startswith(name):
725 fn = filterfn
728 fn = filterfn
726 params = cmd[len(name):].lstrip()
729 params = cmd[len(name):].lstrip()
727 break
730 break
728 if not fn:
731 if not fn:
729 fn = lambda s, c, **kwargs: util.filter(s, c)
732 fn = lambda s, c, **kwargs: util.filter(s, c)
730 # Wrap old filters not supporting keyword arguments
733 # Wrap old filters not supporting keyword arguments
731 if not inspect.getargspec(fn)[2]:
734 if not inspect.getargspec(fn)[2]:
732 oldfn = fn
735 oldfn = fn
733 fn = lambda s, c, **kwargs: oldfn(s, c)
736 fn = lambda s, c, **kwargs: oldfn(s, c)
734 l.append((mf, fn, params))
737 l.append((mf, fn, params))
735 self.filterpats[filter] = l
738 self.filterpats[filter] = l
736 return self.filterpats[filter]
739 return self.filterpats[filter]
737
740
738 def _filter(self, filterpats, filename, data):
741 def _filter(self, filterpats, filename, data):
739 for mf, fn, cmd in filterpats:
742 for mf, fn, cmd in filterpats:
740 if mf(filename):
743 if mf(filename):
741 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
742 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
743 break
746 break
744
747
745 return data
748 return data
746
749
747 @propertycache
750 @propertycache
748 def _encodefilterpats(self):
751 def _encodefilterpats(self):
749 return self._loadfilter('encode')
752 return self._loadfilter('encode')
750
753
751 @propertycache
754 @propertycache
752 def _decodefilterpats(self):
755 def _decodefilterpats(self):
753 return self._loadfilter('decode')
756 return self._loadfilter('decode')
754
757
755 def adddatafilter(self, name, filter):
758 def adddatafilter(self, name, filter):
756 self._datafilters[name] = filter
759 self._datafilters[name] = filter
757
760
758 def wread(self, filename):
761 def wread(self, filename):
759 if self._link(filename):
762 if self._link(filename):
760 data = os.readlink(self.wjoin(filename))
763 data = os.readlink(self.wjoin(filename))
761 else:
764 else:
762 data = self.wopener.read(filename)
765 data = self.wopener.read(filename)
763 return self._filter(self._encodefilterpats, filename, data)
766 return self._filter(self._encodefilterpats, filename, data)
764
767
765 def wwrite(self, filename, data, flags):
768 def wwrite(self, filename, data, flags):
766 data = self._filter(self._decodefilterpats, filename, data)
769 data = self._filter(self._decodefilterpats, filename, data)
767 if 'l' in flags:
770 if 'l' in flags:
768 self.wopener.symlink(data, filename)
771 self.wopener.symlink(data, filename)
769 else:
772 else:
770 self.wopener.write(filename, data)
773 self.wopener.write(filename, data)
771 if 'x' in flags:
774 if 'x' in flags:
772 util.setflags(self.wjoin(filename), False, True)
775 util.setflags(self.wjoin(filename), False, True)
773
776
774 def wwritedata(self, filename, data):
777 def wwritedata(self, filename, data):
775 return self._filter(self._decodefilterpats, filename, data)
778 return self._filter(self._decodefilterpats, filename, data)
776
779
777 def transaction(self, desc):
780 def transaction(self, desc):
778 tr = self._transref and self._transref() or None
781 tr = self._transref and self._transref() or None
779 if tr and tr.running():
782 if tr and tr.running():
780 return tr.nest()
783 return tr.nest()
781
784
782 # abort here if the journal already exists
785 # abort here if the journal already exists
783 if os.path.exists(self.sjoin("journal")):
786 if os.path.exists(self.sjoin("journal")):
784 raise error.RepoError(
787 raise error.RepoError(
785 _("abandoned transaction found - run hg recover"))
788 _("abandoned transaction found - run hg recover"))
786
789
787 self._writejournal(desc)
790 self._writejournal(desc)
788 renames = [(x, undoname(x)) for x in self._journalfiles()]
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
789
792
790 tr = transaction.transaction(self.ui.warn, self.sopener,
793 tr = transaction.transaction(self.ui.warn, self.sopener,
791 self.sjoin("journal"),
794 self.sjoin("journal"),
792 aftertrans(renames),
795 aftertrans(renames),
793 self.store.createmode)
796 self.store.createmode)
794 self._transref = weakref.ref(tr)
797 self._transref = weakref.ref(tr)
795 return tr
798 return tr
796
799
797 def _journalfiles(self):
800 def _journalfiles(self):
798 return (self.sjoin('journal'), self.join('journal.dirstate'),
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
799 self.join('journal.branch'), self.join('journal.desc'),
802 self.join('journal.branch'), self.join('journal.desc'),
800 self.join('journal.bookmarks'),
803 self.join('journal.bookmarks'),
801 self.sjoin('journal.phaseroots'))
804 self.sjoin('journal.phaseroots'))
802
805
803 def undofiles(self):
806 def undofiles(self):
804 return [undoname(x) for x in self._journalfiles()]
807 return [undoname(x) for x in self._journalfiles()]
805
808
806 def _writejournal(self, desc):
809 def _writejournal(self, desc):
807 self.opener.write("journal.dirstate",
810 self.opener.write("journal.dirstate",
808 self.opener.tryread("dirstate"))
811 self.opener.tryread("dirstate"))
809 self.opener.write("journal.branch",
812 self.opener.write("journal.branch",
810 encoding.fromlocal(self.dirstate.branch()))
813 encoding.fromlocal(self.dirstate.branch()))
811 self.opener.write("journal.desc",
814 self.opener.write("journal.desc",
812 "%d\n%s\n" % (len(self), desc))
815 "%d\n%s\n" % (len(self), desc))
813 self.opener.write("journal.bookmarks",
816 self.opener.write("journal.bookmarks",
814 self.opener.tryread("bookmarks"))
817 self.opener.tryread("bookmarks"))
815 self.sopener.write("journal.phaseroots",
818 self.sopener.write("journal.phaseroots",
816 self.sopener.tryread("phaseroots"))
819 self.sopener.tryread("phaseroots"))
817
820
818 def recover(self):
821 def recover(self):
819 lock = self.lock()
822 lock = self.lock()
820 try:
823 try:
821 if os.path.exists(self.sjoin("journal")):
824 if os.path.exists(self.sjoin("journal")):
822 self.ui.status(_("rolling back interrupted transaction\n"))
825 self.ui.status(_("rolling back interrupted transaction\n"))
823 transaction.rollback(self.sopener, self.sjoin("journal"),
826 transaction.rollback(self.sopener, self.sjoin("journal"),
824 self.ui.warn)
827 self.ui.warn)
825 self.invalidate()
828 self.invalidate()
826 return True
829 return True
827 else:
830 else:
828 self.ui.warn(_("no interrupted transaction available\n"))
831 self.ui.warn(_("no interrupted transaction available\n"))
829 return False
832 return False
830 finally:
833 finally:
831 lock.release()
834 lock.release()
832
835
833 def rollback(self, dryrun=False, force=False):
836 def rollback(self, dryrun=False, force=False):
834 wlock = lock = None
837 wlock = lock = None
835 try:
838 try:
836 wlock = self.wlock()
839 wlock = self.wlock()
837 lock = self.lock()
840 lock = self.lock()
838 if os.path.exists(self.sjoin("undo")):
841 if os.path.exists(self.sjoin("undo")):
839 return self._rollback(dryrun, force)
842 return self._rollback(dryrun, force)
840 else:
843 else:
841 self.ui.warn(_("no rollback information available\n"))
844 self.ui.warn(_("no rollback information available\n"))
842 return 1
845 return 1
843 finally:
846 finally:
844 release(lock, wlock)
847 release(lock, wlock)
845
848
846 def _rollback(self, dryrun, force):
849 def _rollback(self, dryrun, force):
847 ui = self.ui
850 ui = self.ui
848 try:
851 try:
849 args = self.opener.read('undo.desc').splitlines()
852 args = self.opener.read('undo.desc').splitlines()
850 (oldlen, desc, detail) = (int(args[0]), args[1], None)
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
851 if len(args) >= 3:
854 if len(args) >= 3:
852 detail = args[2]
855 detail = args[2]
853 oldtip = oldlen - 1
856 oldtip = oldlen - 1
854
857
855 if detail and ui.verbose:
858 if detail and ui.verbose:
856 msg = (_('repository tip rolled back to revision %s'
859 msg = (_('repository tip rolled back to revision %s'
857 ' (undo %s: %s)\n')
860 ' (undo %s: %s)\n')
858 % (oldtip, desc, detail))
861 % (oldtip, desc, detail))
859 else:
862 else:
860 msg = (_('repository tip rolled back to revision %s'
863 msg = (_('repository tip rolled back to revision %s'
861 ' (undo %s)\n')
864 ' (undo %s)\n')
862 % (oldtip, desc))
865 % (oldtip, desc))
863 except IOError:
866 except IOError:
864 msg = _('rolling back unknown transaction\n')
867 msg = _('rolling back unknown transaction\n')
865 desc = None
868 desc = None
866
869
867 if not force and self['.'] != self['tip'] and desc == 'commit':
870 if not force and self['.'] != self['tip'] and desc == 'commit':
868 raise util.Abort(
871 raise util.Abort(
869 _('rollback of last commit while not checked out '
872 _('rollback of last commit while not checked out '
870 'may lose data'), hint=_('use -f to force'))
873 'may lose data'), hint=_('use -f to force'))
871
874
872 ui.status(msg)
875 ui.status(msg)
873 if dryrun:
876 if dryrun:
874 return 0
877 return 0
875
878
876 parents = self.dirstate.parents()
879 parents = self.dirstate.parents()
877 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
878 if os.path.exists(self.join('undo.bookmarks')):
881 if os.path.exists(self.join('undo.bookmarks')):
879 util.rename(self.join('undo.bookmarks'),
882 util.rename(self.join('undo.bookmarks'),
880 self.join('bookmarks'))
883 self.join('bookmarks'))
881 if os.path.exists(self.sjoin('undo.phaseroots')):
884 if os.path.exists(self.sjoin('undo.phaseroots')):
882 util.rename(self.sjoin('undo.phaseroots'),
885 util.rename(self.sjoin('undo.phaseroots'),
883 self.sjoin('phaseroots'))
886 self.sjoin('phaseroots'))
884 self.invalidate()
887 self.invalidate()
885
888
886 parentgone = (parents[0] not in self.changelog.nodemap or
889 parentgone = (parents[0] not in self.changelog.nodemap or
887 parents[1] not in self.changelog.nodemap)
890 parents[1] not in self.changelog.nodemap)
888 if parentgone:
891 if parentgone:
889 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
890 try:
893 try:
891 branch = self.opener.read('undo.branch')
894 branch = self.opener.read('undo.branch')
892 self.dirstate.setbranch(branch)
895 self.dirstate.setbranch(branch)
893 except IOError:
896 except IOError:
894 ui.warn(_('named branch could not be reset: '
897 ui.warn(_('named branch could not be reset: '
895 'current branch is still \'%s\'\n')
898 'current branch is still \'%s\'\n')
896 % self.dirstate.branch())
899 % self.dirstate.branch())
897
900
898 self.dirstate.invalidate()
901 self.dirstate.invalidate()
899 parents = tuple([p.rev() for p in self.parents()])
902 parents = tuple([p.rev() for p in self.parents()])
900 if len(parents) > 1:
903 if len(parents) > 1:
901 ui.status(_('working directory now based on '
904 ui.status(_('working directory now based on '
902 'revisions %d and %d\n') % parents)
905 'revisions %d and %d\n') % parents)
903 else:
906 else:
904 ui.status(_('working directory now based on '
907 ui.status(_('working directory now based on '
905 'revision %d\n') % parents)
908 'revision %d\n') % parents)
906 # TODO: if we know which new heads may result from this rollback, pass
909 # TODO: if we know which new heads may result from this rollback, pass
907 # them to destroy(), which will prevent the branchhead cache from being
910 # them to destroy(), which will prevent the branchhead cache from being
908 # invalidated.
911 # invalidated.
909 self.destroyed()
912 self.destroyed()
910 return 0
913 return 0
911
914
912 def invalidatecaches(self):
915 def invalidatecaches(self):
913 def delcache(name):
916 def delcache(name):
914 try:
917 try:
915 delattr(self, name)
918 delattr(self, name)
916 except AttributeError:
919 except AttributeError:
917 pass
920 pass
918
921
919 delcache('_tagscache')
922 delcache('_tagscache')
920
923
921 self._branchcache = None # in UTF-8
924 self._branchcache = None # in UTF-8
922 self._branchcachetip = None
925 self._branchcachetip = None
923
926
924 def invalidatedirstate(self):
927 def invalidatedirstate(self):
925 '''Invalidates the dirstate, causing the next call to dirstate
928 '''Invalidates the dirstate, causing the next call to dirstate
926 to check if it was modified since the last time it was read,
929 to check if it was modified since the last time it was read,
927 rereading it if it has.
930 rereading it if it has.
928
931
929 This is different to dirstate.invalidate() that it doesn't always
932 This is different to dirstate.invalidate() that it doesn't always
930 rereads the dirstate. Use dirstate.invalidate() if you want to
933 rereads the dirstate. Use dirstate.invalidate() if you want to
931 explicitly read the dirstate again (i.e. restoring it to a previous
934 explicitly read the dirstate again (i.e. restoring it to a previous
932 known good state).'''
935 known good state).'''
933 if 'dirstate' in self.__dict__:
936 if 'dirstate' in self.__dict__:
934 for k in self.dirstate._filecache:
937 for k in self.dirstate._filecache:
935 try:
938 try:
936 delattr(self.dirstate, k)
939 delattr(self.dirstate, k)
937 except AttributeError:
940 except AttributeError:
938 pass
941 pass
939 delattr(self, 'dirstate')
942 delattr(self, 'dirstate')
940
943
941 def invalidate(self):
944 def invalidate(self):
942 for k in self._filecache:
945 for k in self._filecache:
943 # dirstate is invalidated separately in invalidatedirstate()
946 # dirstate is invalidated separately in invalidatedirstate()
944 if k == 'dirstate':
947 if k == 'dirstate':
945 continue
948 continue
946
949
947 try:
950 try:
948 delattr(self, k)
951 delattr(self, k)
949 except AttributeError:
952 except AttributeError:
950 pass
953 pass
951 self.invalidatecaches()
954 self.invalidatecaches()
952
955
953 # Discard all cache entries to force reloading everything.
956 # Discard all cache entries to force reloading everything.
954 self._filecache.clear()
957 self._filecache.clear()
955
958
956 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
957 try:
960 try:
958 l = lock.lock(lockname, 0, releasefn, desc=desc)
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
959 except error.LockHeld, inst:
962 except error.LockHeld, inst:
960 if not wait:
963 if not wait:
961 raise
964 raise
962 self.ui.warn(_("waiting for lock on %s held by %r\n") %
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
963 (desc, inst.locker))
966 (desc, inst.locker))
964 # default to 600 seconds timeout
967 # default to 600 seconds timeout
965 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
966 releasefn, desc=desc)
969 releasefn, desc=desc)
967 if acquirefn:
970 if acquirefn:
968 acquirefn()
971 acquirefn()
969 return l
972 return l
970
973
971 def _afterlock(self, callback):
974 def _afterlock(self, callback):
972 """add a callback to the current repository lock.
975 """add a callback to the current repository lock.
973
976
974 The callback will be executed on lock release."""
977 The callback will be executed on lock release."""
975 l = self._lockref and self._lockref()
978 l = self._lockref and self._lockref()
976 if l:
979 if l:
977 l.postrelease.append(callback)
980 l.postrelease.append(callback)
978 else:
981 else:
979 callback()
982 callback()
980
983
981 def lock(self, wait=True):
984 def lock(self, wait=True):
982 '''Lock the repository store (.hg/store) and return a weak reference
985 '''Lock the repository store (.hg/store) and return a weak reference
983 to the lock. Use this before modifying the store (e.g. committing or
986 to the lock. Use this before modifying the store (e.g. committing or
984 stripping). If you are opening a transaction, get a lock as well.)'''
987 stripping). If you are opening a transaction, get a lock as well.)'''
985 l = self._lockref and self._lockref()
988 l = self._lockref and self._lockref()
986 if l is not None and l.held:
989 if l is not None and l.held:
987 l.lock()
990 l.lock()
988 return l
991 return l
989
992
990 def unlock():
993 def unlock():
991 self.store.write()
994 self.store.write()
992 if '_phasecache' in vars(self):
995 if '_phasecache' in vars(self):
993 self._phasecache.write()
996 self._phasecache.write()
994 for k, ce in self._filecache.items():
997 for k, ce in self._filecache.items():
995 if k == 'dirstate':
998 if k == 'dirstate':
996 continue
999 continue
997 ce.refresh()
1000 ce.refresh()
998
1001
999 l = self._lock(self.sjoin("lock"), wait, unlock,
1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1000 self.invalidate, _('repository %s') % self.origroot)
1003 self.invalidate, _('repository %s') % self.origroot)
1001 self._lockref = weakref.ref(l)
1004 self._lockref = weakref.ref(l)
1002 return l
1005 return l
1003
1006
1004 def wlock(self, wait=True):
1007 def wlock(self, wait=True):
1005 '''Lock the non-store parts of the repository (everything under
1008 '''Lock the non-store parts of the repository (everything under
1006 .hg except .hg/store) and return a weak reference to the lock.
1009 .hg except .hg/store) and return a weak reference to the lock.
1007 Use this before modifying files in .hg.'''
1010 Use this before modifying files in .hg.'''
1008 l = self._wlockref and self._wlockref()
1011 l = self._wlockref and self._wlockref()
1009 if l is not None and l.held:
1012 if l is not None and l.held:
1010 l.lock()
1013 l.lock()
1011 return l
1014 return l
1012
1015
1013 def unlock():
1016 def unlock():
1014 self.dirstate.write()
1017 self.dirstate.write()
1015 ce = self._filecache.get('dirstate')
1018 ce = self._filecache.get('dirstate')
1016 if ce:
1019 if ce:
1017 ce.refresh()
1020 ce.refresh()
1018
1021
1019 l = self._lock(self.join("wlock"), wait, unlock,
1022 l = self._lock(self.join("wlock"), wait, unlock,
1020 self.invalidatedirstate, _('working directory of %s') %
1023 self.invalidatedirstate, _('working directory of %s') %
1021 self.origroot)
1024 self.origroot)
1022 self._wlockref = weakref.ref(l)
1025 self._wlockref = weakref.ref(l)
1023 return l
1026 return l
1024
1027
1025 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1026 """
1029 """
1027 commit an individual file as part of a larger transaction
1030 commit an individual file as part of a larger transaction
1028 """
1031 """
1029
1032
1030 fname = fctx.path()
1033 fname = fctx.path()
1031 text = fctx.data()
1034 text = fctx.data()
1032 flog = self.file(fname)
1035 flog = self.file(fname)
1033 fparent1 = manifest1.get(fname, nullid)
1036 fparent1 = manifest1.get(fname, nullid)
1034 fparent2 = fparent2o = manifest2.get(fname, nullid)
1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1035
1038
1036 meta = {}
1039 meta = {}
1037 copy = fctx.renamed()
1040 copy = fctx.renamed()
1038 if copy and copy[0] != fname:
1041 if copy and copy[0] != fname:
1039 # Mark the new revision of this file as a copy of another
1042 # Mark the new revision of this file as a copy of another
1040 # file. This copy data will effectively act as a parent
1043 # file. This copy data will effectively act as a parent
1041 # of this new revision. If this is a merge, the first
1044 # of this new revision. If this is a merge, the first
1042 # parent will be the nullid (meaning "look up the copy data")
1045 # parent will be the nullid (meaning "look up the copy data")
1043 # and the second one will be the other parent. For example:
1046 # and the second one will be the other parent. For example:
1044 #
1047 #
1045 # 0 --- 1 --- 3 rev1 changes file foo
1048 # 0 --- 1 --- 3 rev1 changes file foo
1046 # \ / rev2 renames foo to bar and changes it
1049 # \ / rev2 renames foo to bar and changes it
1047 # \- 2 -/ rev3 should have bar with all changes and
1050 # \- 2 -/ rev3 should have bar with all changes and
1048 # should record that bar descends from
1051 # should record that bar descends from
1049 # bar in rev2 and foo in rev1
1052 # bar in rev2 and foo in rev1
1050 #
1053 #
1051 # this allows this merge to succeed:
1054 # this allows this merge to succeed:
1052 #
1055 #
1053 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1054 # \ / merging rev3 and rev4 should use bar@rev2
1057 # \ / merging rev3 and rev4 should use bar@rev2
1055 # \- 2 --- 4 as the merge base
1058 # \- 2 --- 4 as the merge base
1056 #
1059 #
1057
1060
1058 cfname = copy[0]
1061 cfname = copy[0]
1059 crev = manifest1.get(cfname)
1062 crev = manifest1.get(cfname)
1060 newfparent = fparent2
1063 newfparent = fparent2
1061
1064
1062 if manifest2: # branch merge
1065 if manifest2: # branch merge
1063 if fparent2 == nullid or crev is None: # copied on remote side
1066 if fparent2 == nullid or crev is None: # copied on remote side
1064 if cfname in manifest2:
1067 if cfname in manifest2:
1065 crev = manifest2[cfname]
1068 crev = manifest2[cfname]
1066 newfparent = fparent1
1069 newfparent = fparent1
1067
1070
1068 # find source in nearest ancestor if we've lost track
1071 # find source in nearest ancestor if we've lost track
1069 if not crev:
1072 if not crev:
1070 self.ui.debug(" %s: searching for copy revision for %s\n" %
1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1071 (fname, cfname))
1074 (fname, cfname))
1072 for ancestor in self[None].ancestors():
1075 for ancestor in self[None].ancestors():
1073 if cfname in ancestor:
1076 if cfname in ancestor:
1074 crev = ancestor[cfname].filenode()
1077 crev = ancestor[cfname].filenode()
1075 break
1078 break
1076
1079
1077 if crev:
1080 if crev:
1078 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1079 meta["copy"] = cfname
1082 meta["copy"] = cfname
1080 meta["copyrev"] = hex(crev)
1083 meta["copyrev"] = hex(crev)
1081 fparent1, fparent2 = nullid, newfparent
1084 fparent1, fparent2 = nullid, newfparent
1082 else:
1085 else:
1083 self.ui.warn(_("warning: can't find ancestor for '%s' "
1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1084 "copied from '%s'!\n") % (fname, cfname))
1087 "copied from '%s'!\n") % (fname, cfname))
1085
1088
1086 elif fparent2 != nullid:
1089 elif fparent2 != nullid:
1087 # is one parent an ancestor of the other?
1090 # is one parent an ancestor of the other?
1088 fparentancestor = flog.ancestor(fparent1, fparent2)
1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1089 if fparentancestor == fparent1:
1092 if fparentancestor == fparent1:
1090 fparent1, fparent2 = fparent2, nullid
1093 fparent1, fparent2 = fparent2, nullid
1091 elif fparentancestor == fparent2:
1094 elif fparentancestor == fparent2:
1092 fparent2 = nullid
1095 fparent2 = nullid
1093
1096
1094 # is the file changed?
1097 # is the file changed?
1095 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1096 changelist.append(fname)
1099 changelist.append(fname)
1097 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1098
1101
1099 # are just the flags changed during merge?
1102 # are just the flags changed during merge?
1100 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1101 changelist.append(fname)
1104 changelist.append(fname)
1102
1105
1103 return fparent1
1106 return fparent1
1104
1107
1105 def commit(self, text="", user=None, date=None, match=None, force=False,
1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1106 editor=False, extra={}):
1109 editor=False, extra={}):
1107 """Add a new revision to current repository.
1110 """Add a new revision to current repository.
1108
1111
1109 Revision information is gathered from the working directory,
1112 Revision information is gathered from the working directory,
1110 match can be used to filter the committed files. If editor is
1113 match can be used to filter the committed files. If editor is
1111 supplied, it is called to get a commit message.
1114 supplied, it is called to get a commit message.
1112 """
1115 """
1113
1116
1114 def fail(f, msg):
1117 def fail(f, msg):
1115 raise util.Abort('%s: %s' % (f, msg))
1118 raise util.Abort('%s: %s' % (f, msg))
1116
1119
1117 if not match:
1120 if not match:
1118 match = matchmod.always(self.root, '')
1121 match = matchmod.always(self.root, '')
1119
1122
1120 if not force:
1123 if not force:
1121 vdirs = []
1124 vdirs = []
1122 match.dir = vdirs.append
1125 match.dir = vdirs.append
1123 match.bad = fail
1126 match.bad = fail
1124
1127
1125 wlock = self.wlock()
1128 wlock = self.wlock()
1126 try:
1129 try:
1127 wctx = self[None]
1130 wctx = self[None]
1128 merge = len(wctx.parents()) > 1
1131 merge = len(wctx.parents()) > 1
1129
1132
1130 if (not force and merge and match and
1133 if (not force and merge and match and
1131 (match.files() or match.anypats())):
1134 (match.files() or match.anypats())):
1132 raise util.Abort(_('cannot partially commit a merge '
1135 raise util.Abort(_('cannot partially commit a merge '
1133 '(do not specify files or patterns)'))
1136 '(do not specify files or patterns)'))
1134
1137
1135 changes = self.status(match=match, clean=force)
1138 changes = self.status(match=match, clean=force)
1136 if force:
1139 if force:
1137 changes[0].extend(changes[6]) # mq may commit unchanged files
1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1138
1141
1139 # check subrepos
1142 # check subrepos
1140 subs = []
1143 subs = []
1141 commitsubs = set()
1144 commitsubs = set()
1142 newstate = wctx.substate.copy()
1145 newstate = wctx.substate.copy()
1143 # only manage subrepos and .hgsubstate if .hgsub is present
1146 # only manage subrepos and .hgsubstate if .hgsub is present
1144 if '.hgsub' in wctx:
1147 if '.hgsub' in wctx:
1145 # we'll decide whether to track this ourselves, thanks
1148 # we'll decide whether to track this ourselves, thanks
1146 if '.hgsubstate' in changes[0]:
1149 if '.hgsubstate' in changes[0]:
1147 changes[0].remove('.hgsubstate')
1150 changes[0].remove('.hgsubstate')
1148 if '.hgsubstate' in changes[2]:
1151 if '.hgsubstate' in changes[2]:
1149 changes[2].remove('.hgsubstate')
1152 changes[2].remove('.hgsubstate')
1150
1153
1151 # compare current state to last committed state
1154 # compare current state to last committed state
1152 # build new substate based on last committed state
1155 # build new substate based on last committed state
1153 oldstate = wctx.p1().substate
1156 oldstate = wctx.p1().substate
1154 for s in sorted(newstate.keys()):
1157 for s in sorted(newstate.keys()):
1155 if not match(s):
1158 if not match(s):
1156 # ignore working copy, use old state if present
1159 # ignore working copy, use old state if present
1157 if s in oldstate:
1160 if s in oldstate:
1158 newstate[s] = oldstate[s]
1161 newstate[s] = oldstate[s]
1159 continue
1162 continue
1160 if not force:
1163 if not force:
1161 raise util.Abort(
1164 raise util.Abort(
1162 _("commit with new subrepo %s excluded") % s)
1165 _("commit with new subrepo %s excluded") % s)
1163 if wctx.sub(s).dirty(True):
1166 if wctx.sub(s).dirty(True):
1164 if not self.ui.configbool('ui', 'commitsubrepos'):
1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1165 raise util.Abort(
1168 raise util.Abort(
1166 _("uncommitted changes in subrepo %s") % s,
1169 _("uncommitted changes in subrepo %s") % s,
1167 hint=_("use --subrepos for recursive commit"))
1170 hint=_("use --subrepos for recursive commit"))
1168 subs.append(s)
1171 subs.append(s)
1169 commitsubs.add(s)
1172 commitsubs.add(s)
1170 else:
1173 else:
1171 bs = wctx.sub(s).basestate()
1174 bs = wctx.sub(s).basestate()
1172 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1173 if oldstate.get(s, (None, None, None))[1] != bs:
1176 if oldstate.get(s, (None, None, None))[1] != bs:
1174 subs.append(s)
1177 subs.append(s)
1175
1178
1176 # check for removed subrepos
1179 # check for removed subrepos
1177 for p in wctx.parents():
1180 for p in wctx.parents():
1178 r = [s for s in p.substate if s not in newstate]
1181 r = [s for s in p.substate if s not in newstate]
1179 subs += [s for s in r if match(s)]
1182 subs += [s for s in r if match(s)]
1180 if subs:
1183 if subs:
1181 if (not match('.hgsub') and
1184 if (not match('.hgsub') and
1182 '.hgsub' in (wctx.modified() + wctx.added())):
1185 '.hgsub' in (wctx.modified() + wctx.added())):
1183 raise util.Abort(
1186 raise util.Abort(
1184 _("can't commit subrepos without .hgsub"))
1187 _("can't commit subrepos without .hgsub"))
1185 changes[0].insert(0, '.hgsubstate')
1188 changes[0].insert(0, '.hgsubstate')
1186
1189
1187 elif '.hgsub' in changes[2]:
1190 elif '.hgsub' in changes[2]:
1188 # clean up .hgsubstate when .hgsub is removed
1191 # clean up .hgsubstate when .hgsub is removed
1189 if ('.hgsubstate' in wctx and
1192 if ('.hgsubstate' in wctx and
1190 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1191 changes[2].insert(0, '.hgsubstate')
1194 changes[2].insert(0, '.hgsubstate')
1192
1195
1193 # make sure all explicit patterns are matched
1196 # make sure all explicit patterns are matched
1194 if not force and match.files():
1197 if not force and match.files():
1195 matched = set(changes[0] + changes[1] + changes[2])
1198 matched = set(changes[0] + changes[1] + changes[2])
1196
1199
1197 for f in match.files():
1200 for f in match.files():
1198 if f == '.' or f in matched or f in wctx.substate:
1201 if f == '.' or f in matched or f in wctx.substate:
1199 continue
1202 continue
1200 if f in changes[3]: # missing
1203 if f in changes[3]: # missing
1201 fail(f, _('file not found!'))
1204 fail(f, _('file not found!'))
1202 if f in vdirs: # visited directory
1205 if f in vdirs: # visited directory
1203 d = f + '/'
1206 d = f + '/'
1204 for mf in matched:
1207 for mf in matched:
1205 if mf.startswith(d):
1208 if mf.startswith(d):
1206 break
1209 break
1207 else:
1210 else:
1208 fail(f, _("no match under directory!"))
1211 fail(f, _("no match under directory!"))
1209 elif f not in self.dirstate:
1212 elif f not in self.dirstate:
1210 fail(f, _("file not tracked!"))
1213 fail(f, _("file not tracked!"))
1211
1214
1212 if (not force and not extra.get("close") and not merge
1215 if (not force and not extra.get("close") and not merge
1213 and not (changes[0] or changes[1] or changes[2])
1216 and not (changes[0] or changes[1] or changes[2])
1214 and wctx.branch() == wctx.p1().branch()):
1217 and wctx.branch() == wctx.p1().branch()):
1215 return None
1218 return None
1216
1219
1217 if merge and changes[3]:
1220 if merge and changes[3]:
1218 raise util.Abort(_("cannot commit merge with missing files"))
1221 raise util.Abort(_("cannot commit merge with missing files"))
1219
1222
1220 ms = mergemod.mergestate(self)
1223 ms = mergemod.mergestate(self)
1221 for f in changes[0]:
1224 for f in changes[0]:
1222 if f in ms and ms[f] == 'u':
1225 if f in ms and ms[f] == 'u':
1223 raise util.Abort(_("unresolved merge conflicts "
1226 raise util.Abort(_("unresolved merge conflicts "
1224 "(see hg help resolve)"))
1227 "(see hg help resolve)"))
1225
1228
1226 cctx = context.workingctx(self, text, user, date, extra, changes)
1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1227 if editor:
1230 if editor:
1228 cctx._text = editor(self, cctx, subs)
1231 cctx._text = editor(self, cctx, subs)
1229 edited = (text != cctx._text)
1232 edited = (text != cctx._text)
1230
1233
1231 # commit subs and write new state
1234 # commit subs and write new state
1232 if subs:
1235 if subs:
1233 for s in sorted(commitsubs):
1236 for s in sorted(commitsubs):
1234 sub = wctx.sub(s)
1237 sub = wctx.sub(s)
1235 self.ui.status(_('committing subrepository %s\n') %
1238 self.ui.status(_('committing subrepository %s\n') %
1236 subrepo.subrelpath(sub))
1239 subrepo.subrelpath(sub))
1237 sr = sub.commit(cctx._text, user, date)
1240 sr = sub.commit(cctx._text, user, date)
1238 newstate[s] = (newstate[s][0], sr)
1241 newstate[s] = (newstate[s][0], sr)
1239 subrepo.writestate(self, newstate)
1242 subrepo.writestate(self, newstate)
1240
1243
1241 # Save commit message in case this transaction gets rolled back
1244 # Save commit message in case this transaction gets rolled back
1242 # (e.g. by a pretxncommit hook). Leave the content alone on
1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1243 # the assumption that the user will use the same editor again.
1246 # the assumption that the user will use the same editor again.
1244 msgfn = self.savecommitmessage(cctx._text)
1247 msgfn = self.savecommitmessage(cctx._text)
1245
1248
1246 p1, p2 = self.dirstate.parents()
1249 p1, p2 = self.dirstate.parents()
1247 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1248 try:
1251 try:
1249 self.hook("precommit", throw=True, parent1=hookp1,
1252 self.hook("precommit", throw=True, parent1=hookp1,
1250 parent2=hookp2)
1253 parent2=hookp2)
1251 ret = self.commitctx(cctx, True)
1254 ret = self.commitctx(cctx, True)
1252 except: # re-raises
1255 except: # re-raises
1253 if edited:
1256 if edited:
1254 self.ui.write(
1257 self.ui.write(
1255 _('note: commit message saved in %s\n') % msgfn)
1258 _('note: commit message saved in %s\n') % msgfn)
1256 raise
1259 raise
1257
1260
1258 # update bookmarks, dirstate and mergestate
1261 # update bookmarks, dirstate and mergestate
1259 bookmarks.update(self, [p1, p2], ret)
1262 bookmarks.update(self, [p1, p2], ret)
1260 for f in changes[0] + changes[1]:
1263 for f in changes[0] + changes[1]:
1261 self.dirstate.normal(f)
1264 self.dirstate.normal(f)
1262 for f in changes[2]:
1265 for f in changes[2]:
1263 self.dirstate.drop(f)
1266 self.dirstate.drop(f)
1264 self.dirstate.setparents(ret)
1267 self.dirstate.setparents(ret)
1265 ms.reset()
1268 ms.reset()
1266 finally:
1269 finally:
1267 wlock.release()
1270 wlock.release()
1268
1271
1269 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1270 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1271 self._afterlock(commithook)
1274 self._afterlock(commithook)
1272 return ret
1275 return ret
1273
1276
1274 def commitctx(self, ctx, error=False):
1277 def commitctx(self, ctx, error=False):
1275 """Add a new revision to current repository.
1278 """Add a new revision to current repository.
1276 Revision information is passed via the context argument.
1279 Revision information is passed via the context argument.
1277 """
1280 """
1278
1281
1279 tr = lock = None
1282 tr = lock = None
1280 removed = list(ctx.removed())
1283 removed = list(ctx.removed())
1281 p1, p2 = ctx.p1(), ctx.p2()
1284 p1, p2 = ctx.p1(), ctx.p2()
1282 user = ctx.user()
1285 user = ctx.user()
1283
1286
1284 lock = self.lock()
1287 lock = self.lock()
1285 try:
1288 try:
1286 tr = self.transaction("commit")
1289 tr = self.transaction("commit")
1287 trp = weakref.proxy(tr)
1290 trp = weakref.proxy(tr)
1288
1291
1289 if ctx.files():
1292 if ctx.files():
1290 m1 = p1.manifest().copy()
1293 m1 = p1.manifest().copy()
1291 m2 = p2.manifest()
1294 m2 = p2.manifest()
1292
1295
1293 # check in files
1296 # check in files
1294 new = {}
1297 new = {}
1295 changed = []
1298 changed = []
1296 linkrev = len(self)
1299 linkrev = len(self)
1297 for f in sorted(ctx.modified() + ctx.added()):
1300 for f in sorted(ctx.modified() + ctx.added()):
1298 self.ui.note(f + "\n")
1301 self.ui.note(f + "\n")
1299 try:
1302 try:
1300 fctx = ctx[f]
1303 fctx = ctx[f]
1301 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1302 changed)
1305 changed)
1303 m1.set(f, fctx.flags())
1306 m1.set(f, fctx.flags())
1304 except OSError, inst:
1307 except OSError, inst:
1305 self.ui.warn(_("trouble committing %s!\n") % f)
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1306 raise
1309 raise
1307 except IOError, inst:
1310 except IOError, inst:
1308 errcode = getattr(inst, 'errno', errno.ENOENT)
1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1309 if error or errcode and errcode != errno.ENOENT:
1312 if error or errcode and errcode != errno.ENOENT:
1310 self.ui.warn(_("trouble committing %s!\n") % f)
1313 self.ui.warn(_("trouble committing %s!\n") % f)
1311 raise
1314 raise
1312 else:
1315 else:
1313 removed.append(f)
1316 removed.append(f)
1314
1317
1315 # update manifest
1318 # update manifest
1316 m1.update(new)
1319 m1.update(new)
1317 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1318 drop = [f for f in removed if f in m1]
1321 drop = [f for f in removed if f in m1]
1319 for f in drop:
1322 for f in drop:
1320 del m1[f]
1323 del m1[f]
1321 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1322 p2.manifestnode(), (new, drop))
1325 p2.manifestnode(), (new, drop))
1323 files = changed + removed
1326 files = changed + removed
1324 else:
1327 else:
1325 mn = p1.manifestnode()
1328 mn = p1.manifestnode()
1326 files = []
1329 files = []
1327
1330
1328 # update changelog
1331 # update changelog
1329 self.changelog.delayupdate()
1332 self.changelog.delayupdate()
1330 n = self.changelog.add(mn, files, ctx.description(),
1333 n = self.changelog.add(mn, files, ctx.description(),
1331 trp, p1.node(), p2.node(),
1334 trp, p1.node(), p2.node(),
1332 user, ctx.date(), ctx.extra().copy())
1335 user, ctx.date(), ctx.extra().copy())
1333 p = lambda: self.changelog.writepending() and self.root or ""
1336 p = lambda: self.changelog.writepending() and self.root or ""
1334 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1335 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1336 parent2=xp2, pending=p)
1339 parent2=xp2, pending=p)
1337 self.changelog.finalize(trp)
1340 self.changelog.finalize(trp)
1338 # set the new commit is proper phase
1341 # set the new commit is proper phase
1339 targetphase = phases.newcommitphase(self.ui)
1342 targetphase = phases.newcommitphase(self.ui)
1340 if targetphase:
1343 if targetphase:
1341 # retract boundary do not alter parent changeset.
1344 # retract boundary do not alter parent changeset.
1342 # if a parent have higher the resulting phase will
1345 # if a parent have higher the resulting phase will
1343 # be compliant anyway
1346 # be compliant anyway
1344 #
1347 #
1345 # if minimal phase was 0 we don't need to retract anything
1348 # if minimal phase was 0 we don't need to retract anything
1346 phases.retractboundary(self, targetphase, [n])
1349 phases.retractboundary(self, targetphase, [n])
1347 tr.close()
1350 tr.close()
1348 self.updatebranchcache()
1351 self.updatebranchcache()
1349 return n
1352 return n
1350 finally:
1353 finally:
1351 if tr:
1354 if tr:
1352 tr.release()
1355 tr.release()
1353 lock.release()
1356 lock.release()
1354
1357
1355 def destroyed(self, newheadnodes=None):
1358 def destroyed(self, newheadnodes=None):
1356 '''Inform the repository that nodes have been destroyed.
1359 '''Inform the repository that nodes have been destroyed.
1357 Intended for use by strip and rollback, so there's a common
1360 Intended for use by strip and rollback, so there's a common
1358 place for anything that has to be done after destroying history.
1361 place for anything that has to be done after destroying history.
1359
1362
1360 If you know the branchheadcache was uptodate before nodes were removed
1363 If you know the branchheadcache was uptodate before nodes were removed
1361 and you also know the set of candidate new heads that may have resulted
1364 and you also know the set of candidate new heads that may have resulted
1362 from the destruction, you can set newheadnodes. This will enable the
1365 from the destruction, you can set newheadnodes. This will enable the
1363 code to update the branchheads cache, rather than having future code
1366 code to update the branchheads cache, rather than having future code
1364 decide it's invalid and regenrating it from scratch.
1367 decide it's invalid and regenrating it from scratch.
1365 '''
1368 '''
1366 # If we have info, newheadnodes, on how to update the branch cache, do
1369 # If we have info, newheadnodes, on how to update the branch cache, do
1367 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1368 # will be caught the next time it is read.
1371 # will be caught the next time it is read.
1369 if newheadnodes:
1372 if newheadnodes:
1370 tiprev = len(self) - 1
1373 tiprev = len(self) - 1
1371 ctxgen = (self[node] for node in newheadnodes
1374 ctxgen = (self[node] for node in newheadnodes
1372 if self.changelog.hasnode(node))
1375 if self.changelog.hasnode(node))
1373 self._updatebranchcache(self._branchcache, ctxgen)
1376 self._updatebranchcache(self._branchcache, ctxgen)
1374 self._writebranchcache(self._branchcache, self.changelog.tip(),
1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1375 tiprev)
1378 tiprev)
1376
1379
1377 # Ensure the persistent tag cache is updated. Doing it now
1380 # Ensure the persistent tag cache is updated. Doing it now
1378 # means that the tag cache only has to worry about destroyed
1381 # means that the tag cache only has to worry about destroyed
1379 # heads immediately after a strip/rollback. That in turn
1382 # heads immediately after a strip/rollback. That in turn
1380 # guarantees that "cachetip == currenttip" (comparing both rev
1383 # guarantees that "cachetip == currenttip" (comparing both rev
1381 # and node) always means no nodes have been added or destroyed.
1384 # and node) always means no nodes have been added or destroyed.
1382
1385
1383 # XXX this is suboptimal when qrefresh'ing: we strip the current
1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1384 # head, refresh the tag cache, then immediately add a new head.
1387 # head, refresh the tag cache, then immediately add a new head.
1385 # But I think doing it this way is necessary for the "instant
1388 # But I think doing it this way is necessary for the "instant
1386 # tag cache retrieval" case to work.
1389 # tag cache retrieval" case to work.
1387 self.invalidatecaches()
1390 self.invalidatecaches()
1388
1391
1389 def walk(self, match, node=None):
1392 def walk(self, match, node=None):
1390 '''
1393 '''
1391 walk recursively through the directory tree or a given
1394 walk recursively through the directory tree or a given
1392 changeset, finding all files matched by the match
1395 changeset, finding all files matched by the match
1393 function
1396 function
1394 '''
1397 '''
1395 return self[node].walk(match)
1398 return self[node].walk(match)
1396
1399
1397 def status(self, node1='.', node2=None, match=None,
1400 def status(self, node1='.', node2=None, match=None,
1398 ignored=False, clean=False, unknown=False,
1401 ignored=False, clean=False, unknown=False,
1399 listsubrepos=False):
1402 listsubrepos=False):
1400 """return status of files between two nodes or node and working
1403 """return status of files between two nodes or node and working
1401 directory.
1404 directory.
1402
1405
1403 If node1 is None, use the first dirstate parent instead.
1406 If node1 is None, use the first dirstate parent instead.
1404 If node2 is None, compare node1 with working directory.
1407 If node2 is None, compare node1 with working directory.
1405 """
1408 """
1406
1409
1407 def mfmatches(ctx):
1410 def mfmatches(ctx):
1408 mf = ctx.manifest().copy()
1411 mf = ctx.manifest().copy()
1409 if match.always():
1412 if match.always():
1410 return mf
1413 return mf
1411 for fn in mf.keys():
1414 for fn in mf.keys():
1412 if not match(fn):
1415 if not match(fn):
1413 del mf[fn]
1416 del mf[fn]
1414 return mf
1417 return mf
1415
1418
1416 if isinstance(node1, context.changectx):
1419 if isinstance(node1, context.changectx):
1417 ctx1 = node1
1420 ctx1 = node1
1418 else:
1421 else:
1419 ctx1 = self[node1]
1422 ctx1 = self[node1]
1420 if isinstance(node2, context.changectx):
1423 if isinstance(node2, context.changectx):
1421 ctx2 = node2
1424 ctx2 = node2
1422 else:
1425 else:
1423 ctx2 = self[node2]
1426 ctx2 = self[node2]
1424
1427
1425 working = ctx2.rev() is None
1428 working = ctx2.rev() is None
1426 parentworking = working and ctx1 == self['.']
1429 parentworking = working and ctx1 == self['.']
1427 match = match or matchmod.always(self.root, self.getcwd())
1430 match = match or matchmod.always(self.root, self.getcwd())
1428 listignored, listclean, listunknown = ignored, clean, unknown
1431 listignored, listclean, listunknown = ignored, clean, unknown
1429
1432
1430 # load earliest manifest first for caching reasons
1433 # load earliest manifest first for caching reasons
1431 if not working and ctx2.rev() < ctx1.rev():
1434 if not working and ctx2.rev() < ctx1.rev():
1432 ctx2.manifest()
1435 ctx2.manifest()
1433
1436
1434 if not parentworking:
1437 if not parentworking:
1435 def bad(f, msg):
1438 def bad(f, msg):
1436 # 'f' may be a directory pattern from 'match.files()',
1439 # 'f' may be a directory pattern from 'match.files()',
1437 # so 'f not in ctx1' is not enough
1440 # so 'f not in ctx1' is not enough
1438 if f not in ctx1 and f not in ctx1.dirs():
1441 if f not in ctx1 and f not in ctx1.dirs():
1439 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1440 match.bad = bad
1443 match.bad = bad
1441
1444
1442 if working: # we need to scan the working dir
1445 if working: # we need to scan the working dir
1443 subrepos = []
1446 subrepos = []
1444 if '.hgsub' in self.dirstate:
1447 if '.hgsub' in self.dirstate:
1445 subrepos = ctx2.substate.keys()
1448 subrepos = ctx2.substate.keys()
1446 s = self.dirstate.status(match, subrepos, listignored,
1449 s = self.dirstate.status(match, subrepos, listignored,
1447 listclean, listunknown)
1450 listclean, listunknown)
1448 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1449
1452
1450 # check for any possibly clean files
1453 # check for any possibly clean files
1451 if parentworking and cmp:
1454 if parentworking and cmp:
1452 fixup = []
1455 fixup = []
1453 # do a full compare of any files that might have changed
1456 # do a full compare of any files that might have changed
1454 for f in sorted(cmp):
1457 for f in sorted(cmp):
1455 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1456 or ctx1[f].cmp(ctx2[f])):
1459 or ctx1[f].cmp(ctx2[f])):
1457 modified.append(f)
1460 modified.append(f)
1458 else:
1461 else:
1459 fixup.append(f)
1462 fixup.append(f)
1460
1463
1461 # update dirstate for files that are actually clean
1464 # update dirstate for files that are actually clean
1462 if fixup:
1465 if fixup:
1463 if listclean:
1466 if listclean:
1464 clean += fixup
1467 clean += fixup
1465
1468
1466 try:
1469 try:
1467 # updating the dirstate is optional
1470 # updating the dirstate is optional
1468 # so we don't wait on the lock
1471 # so we don't wait on the lock
1469 wlock = self.wlock(False)
1472 wlock = self.wlock(False)
1470 try:
1473 try:
1471 for f in fixup:
1474 for f in fixup:
1472 self.dirstate.normal(f)
1475 self.dirstate.normal(f)
1473 finally:
1476 finally:
1474 wlock.release()
1477 wlock.release()
1475 except error.LockError:
1478 except error.LockError:
1476 pass
1479 pass
1477
1480
1478 if not parentworking:
1481 if not parentworking:
1479 mf1 = mfmatches(ctx1)
1482 mf1 = mfmatches(ctx1)
1480 if working:
1483 if working:
1481 # we are comparing working dir against non-parent
1484 # we are comparing working dir against non-parent
1482 # generate a pseudo-manifest for the working dir
1485 # generate a pseudo-manifest for the working dir
1483 mf2 = mfmatches(self['.'])
1486 mf2 = mfmatches(self['.'])
1484 for f in cmp + modified + added:
1487 for f in cmp + modified + added:
1485 mf2[f] = None
1488 mf2[f] = None
1486 mf2.set(f, ctx2.flags(f))
1489 mf2.set(f, ctx2.flags(f))
1487 for f in removed:
1490 for f in removed:
1488 if f in mf2:
1491 if f in mf2:
1489 del mf2[f]
1492 del mf2[f]
1490 else:
1493 else:
1491 # we are comparing two revisions
1494 # we are comparing two revisions
1492 deleted, unknown, ignored = [], [], []
1495 deleted, unknown, ignored = [], [], []
1493 mf2 = mfmatches(ctx2)
1496 mf2 = mfmatches(ctx2)
1494
1497
1495 modified, added, clean = [], [], []
1498 modified, added, clean = [], [], []
1496 withflags = mf1.withflags() | mf2.withflags()
1499 withflags = mf1.withflags() | mf2.withflags()
1497 for fn in mf2:
1500 for fn in mf2:
1498 if fn in mf1:
1501 if fn in mf1:
1499 if (fn not in deleted and
1502 if (fn not in deleted and
1500 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1501 (mf1[fn] != mf2[fn] and
1504 (mf1[fn] != mf2[fn] and
1502 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1503 modified.append(fn)
1506 modified.append(fn)
1504 elif listclean:
1507 elif listclean:
1505 clean.append(fn)
1508 clean.append(fn)
1506 del mf1[fn]
1509 del mf1[fn]
1507 elif fn not in deleted:
1510 elif fn not in deleted:
1508 added.append(fn)
1511 added.append(fn)
1509 removed = mf1.keys()
1512 removed = mf1.keys()
1510
1513
1511 if working and modified and not self.dirstate._checklink:
1514 if working and modified and not self.dirstate._checklink:
1512 # Symlink placeholders may get non-symlink-like contents
1515 # Symlink placeholders may get non-symlink-like contents
1513 # via user error or dereferencing by NFS or Samba servers,
1516 # via user error or dereferencing by NFS or Samba servers,
1514 # so we filter out any placeholders that don't look like a
1517 # so we filter out any placeholders that don't look like a
1515 # symlink
1518 # symlink
1516 sane = []
1519 sane = []
1517 for f in modified:
1520 for f in modified:
1518 if ctx2.flags(f) == 'l':
1521 if ctx2.flags(f) == 'l':
1519 d = ctx2[f].data()
1522 d = ctx2[f].data()
1520 if len(d) >= 1024 or '\n' in d or util.binary(d):
1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1521 self.ui.debug('ignoring suspect symlink placeholder'
1524 self.ui.debug('ignoring suspect symlink placeholder'
1522 ' "%s"\n' % f)
1525 ' "%s"\n' % f)
1523 continue
1526 continue
1524 sane.append(f)
1527 sane.append(f)
1525 modified = sane
1528 modified = sane
1526
1529
1527 r = modified, added, removed, deleted, unknown, ignored, clean
1530 r = modified, added, removed, deleted, unknown, ignored, clean
1528
1531
1529 if listsubrepos:
1532 if listsubrepos:
1530 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1531 if working:
1534 if working:
1532 rev2 = None
1535 rev2 = None
1533 else:
1536 else:
1534 rev2 = ctx2.substate[subpath][1]
1537 rev2 = ctx2.substate[subpath][1]
1535 try:
1538 try:
1536 submatch = matchmod.narrowmatcher(subpath, match)
1539 submatch = matchmod.narrowmatcher(subpath, match)
1537 s = sub.status(rev2, match=submatch, ignored=listignored,
1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1538 clean=listclean, unknown=listunknown,
1541 clean=listclean, unknown=listunknown,
1539 listsubrepos=True)
1542 listsubrepos=True)
1540 for rfiles, sfiles in zip(r, s):
1543 for rfiles, sfiles in zip(r, s):
1541 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1542 except error.LookupError:
1545 except error.LookupError:
1543 self.ui.status(_("skipping missing subrepository: %s\n")
1546 self.ui.status(_("skipping missing subrepository: %s\n")
1544 % subpath)
1547 % subpath)
1545
1548
1546 for l in r:
1549 for l in r:
1547 l.sort()
1550 l.sort()
1548 return r
1551 return r
1549
1552
1550 def heads(self, start=None):
1553 def heads(self, start=None):
1551 heads = self.changelog.heads(start)
1554 heads = self.changelog.heads(start)
1552 # sort the output in rev descending order
1555 # sort the output in rev descending order
1553 return sorted(heads, key=self.changelog.rev, reverse=True)
1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1554
1557
1555 def branchheads(self, branch=None, start=None, closed=False):
1558 def branchheads(self, branch=None, start=None, closed=False):
1556 '''return a (possibly filtered) list of heads for the given branch
1559 '''return a (possibly filtered) list of heads for the given branch
1557
1560
1558 Heads are returned in topological order, from newest to oldest.
1561 Heads are returned in topological order, from newest to oldest.
1559 If branch is None, use the dirstate branch.
1562 If branch is None, use the dirstate branch.
1560 If start is not None, return only heads reachable from start.
1563 If start is not None, return only heads reachable from start.
1561 If closed is True, return heads that are marked as closed as well.
1564 If closed is True, return heads that are marked as closed as well.
1562 '''
1565 '''
1563 if branch is None:
1566 if branch is None:
1564 branch = self[None].branch()
1567 branch = self[None].branch()
1565 branches = self.branchmap()
1568 branches = self.branchmap()
1566 if branch not in branches:
1569 if branch not in branches:
1567 return []
1570 return []
1568 # the cache returns heads ordered lowest to highest
1571 # the cache returns heads ordered lowest to highest
1569 bheads = list(reversed(branches[branch]))
1572 bheads = list(reversed(branches[branch]))
1570 if start is not None:
1573 if start is not None:
1571 # filter out the heads that cannot be reached from startrev
1574 # filter out the heads that cannot be reached from startrev
1572 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1573 bheads = [h for h in bheads if h in fbheads]
1576 bheads = [h for h in bheads if h in fbheads]
1574 if not closed:
1577 if not closed:
1575 bheads = [h for h in bheads if not self[h].closesbranch()]
1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1576 return bheads
1579 return bheads
1577
1580
1578 def branches(self, nodes):
1581 def branches(self, nodes):
1579 if not nodes:
1582 if not nodes:
1580 nodes = [self.changelog.tip()]
1583 nodes = [self.changelog.tip()]
1581 b = []
1584 b = []
1582 for n in nodes:
1585 for n in nodes:
1583 t = n
1586 t = n
1584 while True:
1587 while True:
1585 p = self.changelog.parents(n)
1588 p = self.changelog.parents(n)
1586 if p[1] != nullid or p[0] == nullid:
1589 if p[1] != nullid or p[0] == nullid:
1587 b.append((t, n, p[0], p[1]))
1590 b.append((t, n, p[0], p[1]))
1588 break
1591 break
1589 n = p[0]
1592 n = p[0]
1590 return b
1593 return b
1591
1594
1592 def between(self, pairs):
1595 def between(self, pairs):
1593 r = []
1596 r = []
1594
1597
1595 for top, bottom in pairs:
1598 for top, bottom in pairs:
1596 n, l, i = top, [], 0
1599 n, l, i = top, [], 0
1597 f = 1
1600 f = 1
1598
1601
1599 while n != bottom and n != nullid:
1602 while n != bottom and n != nullid:
1600 p = self.changelog.parents(n)[0]
1603 p = self.changelog.parents(n)[0]
1601 if i == f:
1604 if i == f:
1602 l.append(n)
1605 l.append(n)
1603 f = f * 2
1606 f = f * 2
1604 n = p
1607 n = p
1605 i += 1
1608 i += 1
1606
1609
1607 r.append(l)
1610 r.append(l)
1608
1611
1609 return r
1612 return r
1610
1613
1611 def pull(self, remote, heads=None, force=False):
1614 def pull(self, remote, heads=None, force=False):
1612 # don't open transaction for nothing or you break future useful
1615 # don't open transaction for nothing or you break future useful
1613 # rollback call
1616 # rollback call
1614 tr = None
1617 tr = None
1615 trname = 'pull\n' + util.hidepassword(remote.url())
1618 trname = 'pull\n' + util.hidepassword(remote.url())
1616 lock = self.lock()
1619 lock = self.lock()
1617 try:
1620 try:
1618 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1619 force=force)
1622 force=force)
1620 common, fetch, rheads = tmp
1623 common, fetch, rheads = tmp
1621 if not fetch:
1624 if not fetch:
1622 self.ui.status(_("no changes found\n"))
1625 self.ui.status(_("no changes found\n"))
1623 added = []
1626 added = []
1624 result = 0
1627 result = 0
1625 else:
1628 else:
1626 tr = self.transaction(trname)
1629 tr = self.transaction(trname)
1627 if heads is None and list(common) == [nullid]:
1630 if heads is None and list(common) == [nullid]:
1628 self.ui.status(_("requesting all changes\n"))
1631 self.ui.status(_("requesting all changes\n"))
1629 elif heads is None and remote.capable('changegroupsubset'):
1632 elif heads is None and remote.capable('changegroupsubset'):
1630 # issue1320, avoid a race if remote changed after discovery
1633 # issue1320, avoid a race if remote changed after discovery
1631 heads = rheads
1634 heads = rheads
1632
1635
1633 if remote.capable('getbundle'):
1636 if remote.capable('getbundle'):
1634 cg = remote.getbundle('pull', common=common,
1637 cg = remote.getbundle('pull', common=common,
1635 heads=heads or rheads)
1638 heads=heads or rheads)
1636 elif heads is None:
1639 elif heads is None:
1637 cg = remote.changegroup(fetch, 'pull')
1640 cg = remote.changegroup(fetch, 'pull')
1638 elif not remote.capable('changegroupsubset'):
1641 elif not remote.capable('changegroupsubset'):
1639 raise util.Abort(_("partial pull cannot be done because "
1642 raise util.Abort(_("partial pull cannot be done because "
1640 "other repository doesn't support "
1643 "other repository doesn't support "
1641 "changegroupsubset."))
1644 "changegroupsubset."))
1642 else:
1645 else:
1643 cg = remote.changegroupsubset(fetch, heads, 'pull')
1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1644 clstart = len(self.changelog)
1647 clstart = len(self.changelog)
1645 result = self.addchangegroup(cg, 'pull', remote.url())
1648 result = self.addchangegroup(cg, 'pull', remote.url())
1646 clend = len(self.changelog)
1649 clend = len(self.changelog)
1647 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1648
1651
1649 # compute target subset
1652 # compute target subset
1650 if heads is None:
1653 if heads is None:
1651 # We pulled every thing possible
1654 # We pulled every thing possible
1652 # sync on everything common
1655 # sync on everything common
1653 subset = common + added
1656 subset = common + added
1654 else:
1657 else:
1655 # We pulled a specific subset
1658 # We pulled a specific subset
1656 # sync on this subset
1659 # sync on this subset
1657 subset = heads
1660 subset = heads
1658
1661
1659 # Get remote phases data from remote
1662 # Get remote phases data from remote
1660 remotephases = remote.listkeys('phases')
1663 remotephases = remote.listkeys('phases')
1661 publishing = bool(remotephases.get('publishing', False))
1664 publishing = bool(remotephases.get('publishing', False))
1662 if remotephases and not publishing:
1665 if remotephases and not publishing:
1663 # remote is new and unpublishing
1666 # remote is new and unpublishing
1664 pheads, _dr = phases.analyzeremotephases(self, subset,
1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1665 remotephases)
1668 remotephases)
1666 phases.advanceboundary(self, phases.public, pheads)
1669 phases.advanceboundary(self, phases.public, pheads)
1667 phases.advanceboundary(self, phases.draft, subset)
1670 phases.advanceboundary(self, phases.draft, subset)
1668 else:
1671 else:
1669 # Remote is old or publishing all common changesets
1672 # Remote is old or publishing all common changesets
1670 # should be seen as public
1673 # should be seen as public
1671 phases.advanceboundary(self, phases.public, subset)
1674 phases.advanceboundary(self, phases.public, subset)
1672
1675
1673 remoteobs = remote.listkeys('obsolete')
1676 remoteobs = remote.listkeys('obsolete')
1674 if 'dump' in remoteobs:
1677 if 'dump' in remoteobs:
1675 if tr is None:
1678 if tr is None:
1676 tr = self.transaction(trname)
1679 tr = self.transaction(trname)
1677 data = base85.b85decode(remoteobs['dump'])
1680 data = base85.b85decode(remoteobs['dump'])
1678 self.obsstore.mergemarkers(tr, data)
1681 self.obsstore.mergemarkers(tr, data)
1679 if tr is not None:
1682 if tr is not None:
1680 tr.close()
1683 tr.close()
1681 finally:
1684 finally:
1682 if tr is not None:
1685 if tr is not None:
1683 tr.release()
1686 tr.release()
1684 lock.release()
1687 lock.release()
1685
1688
1686 return result
1689 return result
1687
1690
1688 def checkpush(self, force, revs):
1691 def checkpush(self, force, revs):
1689 """Extensions can override this function if additional checks have
1692 """Extensions can override this function if additional checks have
1690 to be performed before pushing, or call it if they override push
1693 to be performed before pushing, or call it if they override push
1691 command.
1694 command.
1692 """
1695 """
1693 pass
1696 pass
1694
1697
1695 def push(self, remote, force=False, revs=None, newbranch=False):
1698 def push(self, remote, force=False, revs=None, newbranch=False):
1696 '''Push outgoing changesets (limited by revs) from the current
1699 '''Push outgoing changesets (limited by revs) from the current
1697 repository to remote. Return an integer:
1700 repository to remote. Return an integer:
1698 - None means nothing to push
1701 - None means nothing to push
1699 - 0 means HTTP error
1702 - 0 means HTTP error
1700 - 1 means we pushed and remote head count is unchanged *or*
1703 - 1 means we pushed and remote head count is unchanged *or*
1701 we have outgoing changesets but refused to push
1704 we have outgoing changesets but refused to push
1702 - other values as described by addchangegroup()
1705 - other values as described by addchangegroup()
1703 '''
1706 '''
1704 # there are two ways to push to remote repo:
1707 # there are two ways to push to remote repo:
1705 #
1708 #
1706 # addchangegroup assumes local user can lock remote
1709 # addchangegroup assumes local user can lock remote
1707 # repo (local filesystem, old ssh servers).
1710 # repo (local filesystem, old ssh servers).
1708 #
1711 #
1709 # unbundle assumes local user cannot lock remote repo (new ssh
1712 # unbundle assumes local user cannot lock remote repo (new ssh
1710 # servers, http servers).
1713 # servers, http servers).
1711
1714
1712 # get local lock as we might write phase data
1715 # get local lock as we might write phase data
1713 locallock = self.lock()
1716 locallock = self.lock()
1714 try:
1717 try:
1715 self.checkpush(force, revs)
1718 self.checkpush(force, revs)
1716 lock = None
1719 lock = None
1717 unbundle = remote.capable('unbundle')
1720 unbundle = remote.capable('unbundle')
1718 if not unbundle:
1721 if not unbundle:
1719 lock = remote.lock()
1722 lock = remote.lock()
1720 try:
1723 try:
1721 # discovery
1724 # discovery
1722 fci = discovery.findcommonincoming
1725 fci = discovery.findcommonincoming
1723 commoninc = fci(self, remote, force=force)
1726 commoninc = fci(self, remote, force=force)
1724 common, inc, remoteheads = commoninc
1727 common, inc, remoteheads = commoninc
1725 fco = discovery.findcommonoutgoing
1728 fco = discovery.findcommonoutgoing
1726 outgoing = fco(self, remote, onlyheads=revs,
1729 outgoing = fco(self, remote, onlyheads=revs,
1727 commoninc=commoninc, force=force)
1730 commoninc=commoninc, force=force)
1728
1731
1729
1732
1730 if not outgoing.missing:
1733 if not outgoing.missing:
1731 # nothing to push
1734 # nothing to push
1732 scmutil.nochangesfound(self.ui, outgoing.excluded)
1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1733 ret = None
1736 ret = None
1734 else:
1737 else:
1735 # something to push
1738 # something to push
1736 if not force:
1739 if not force:
1737 discovery.checkheads(self, remote, outgoing,
1740 discovery.checkheads(self, remote, outgoing,
1738 remoteheads, newbranch,
1741 remoteheads, newbranch,
1739 bool(inc))
1742 bool(inc))
1740
1743
1741 # create a changegroup from local
1744 # create a changegroup from local
1742 if revs is None and not outgoing.excluded:
1745 if revs is None and not outgoing.excluded:
1743 # push everything,
1746 # push everything,
1744 # use the fast path, no race possible on push
1747 # use the fast path, no race possible on push
1745 cg = self._changegroup(outgoing.missing, 'push')
1748 cg = self._changegroup(outgoing.missing, 'push')
1746 else:
1749 else:
1747 cg = self.getlocalbundle('push', outgoing)
1750 cg = self.getlocalbundle('push', outgoing)
1748
1751
1749 # apply changegroup to remote
1752 # apply changegroup to remote
1750 if unbundle:
1753 if unbundle:
1751 # local repo finds heads on server, finds out what
1754 # local repo finds heads on server, finds out what
1752 # revs it must push. once revs transferred, if server
1755 # revs it must push. once revs transferred, if server
1753 # finds it has different heads (someone else won
1756 # finds it has different heads (someone else won
1754 # commit/push race), server aborts.
1757 # commit/push race), server aborts.
1755 if force:
1758 if force:
1756 remoteheads = ['force']
1759 remoteheads = ['force']
1757 # ssh: return remote's addchangegroup()
1760 # ssh: return remote's addchangegroup()
1758 # http: return remote's addchangegroup() or 0 for error
1761 # http: return remote's addchangegroup() or 0 for error
1759 ret = remote.unbundle(cg, remoteheads, 'push')
1762 ret = remote.unbundle(cg, remoteheads, 'push')
1760 else:
1763 else:
1761 # we return an integer indicating remote head count
1764 # we return an integer indicating remote head count
1762 # change
1765 # change
1763 ret = remote.addchangegroup(cg, 'push', self.url())
1766 ret = remote.addchangegroup(cg, 'push', self.url())
1764
1767
1765 if ret:
1768 if ret:
1766 # push succeed, synchonize target of the push
1769 # push succeed, synchonize target of the push
1767 cheads = outgoing.missingheads
1770 cheads = outgoing.missingheads
1768 elif revs is None:
1771 elif revs is None:
1769 # All out push fails. synchronize all common
1772 # All out push fails. synchronize all common
1770 cheads = outgoing.commonheads
1773 cheads = outgoing.commonheads
1771 else:
1774 else:
1772 # I want cheads = heads(::missingheads and ::commonheads)
1775 # I want cheads = heads(::missingheads and ::commonheads)
1773 # (missingheads is revs with secret changeset filtered out)
1776 # (missingheads is revs with secret changeset filtered out)
1774 #
1777 #
1775 # This can be expressed as:
1778 # This can be expressed as:
1776 # cheads = ( (missingheads and ::commonheads)
1779 # cheads = ( (missingheads and ::commonheads)
1777 # + (commonheads and ::missingheads))"
1780 # + (commonheads and ::missingheads))"
1778 # )
1781 # )
1779 #
1782 #
1780 # while trying to push we already computed the following:
1783 # while trying to push we already computed the following:
1781 # common = (::commonheads)
1784 # common = (::commonheads)
1782 # missing = ((commonheads::missingheads) - commonheads)
1785 # missing = ((commonheads::missingheads) - commonheads)
1783 #
1786 #
1784 # We can pick:
1787 # We can pick:
1785 # * missingheads part of comon (::commonheads)
1788 # * missingheads part of comon (::commonheads)
1786 common = set(outgoing.common)
1789 common = set(outgoing.common)
1787 cheads = [node for node in revs if node in common]
1790 cheads = [node for node in revs if node in common]
1788 # and
1791 # and
1789 # * commonheads parents on missing
1792 # * commonheads parents on missing
1790 revset = self.set('%ln and parents(roots(%ln))',
1793 revset = self.set('%ln and parents(roots(%ln))',
1791 outgoing.commonheads,
1794 outgoing.commonheads,
1792 outgoing.missing)
1795 outgoing.missing)
1793 cheads.extend(c.node() for c in revset)
1796 cheads.extend(c.node() for c in revset)
1794 # even when we don't push, exchanging phase data is useful
1797 # even when we don't push, exchanging phase data is useful
1795 remotephases = remote.listkeys('phases')
1798 remotephases = remote.listkeys('phases')
1796 if not remotephases: # old server or public only repo
1799 if not remotephases: # old server or public only repo
1797 phases.advanceboundary(self, phases.public, cheads)
1800 phases.advanceboundary(self, phases.public, cheads)
1798 # don't push any phase data as there is nothing to push
1801 # don't push any phase data as there is nothing to push
1799 else:
1802 else:
1800 ana = phases.analyzeremotephases(self, cheads, remotephases)
1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1801 pheads, droots = ana
1804 pheads, droots = ana
1802 ### Apply remote phase on local
1805 ### Apply remote phase on local
1803 if remotephases.get('publishing', False):
1806 if remotephases.get('publishing', False):
1804 phases.advanceboundary(self, phases.public, cheads)
1807 phases.advanceboundary(self, phases.public, cheads)
1805 else: # publish = False
1808 else: # publish = False
1806 phases.advanceboundary(self, phases.public, pheads)
1809 phases.advanceboundary(self, phases.public, pheads)
1807 phases.advanceboundary(self, phases.draft, cheads)
1810 phases.advanceboundary(self, phases.draft, cheads)
1808 ### Apply local phase on remote
1811 ### Apply local phase on remote
1809
1812
1810 # Get the list of all revs draft on remote by public here.
1813 # Get the list of all revs draft on remote by public here.
1811 # XXX Beware that revset break if droots is not strictly
1814 # XXX Beware that revset break if droots is not strictly
1812 # XXX root we may want to ensure it is but it is costly
1815 # XXX root we may want to ensure it is but it is costly
1813 outdated = self.set('heads((%ln::%ln) and public())',
1816 outdated = self.set('heads((%ln::%ln) and public())',
1814 droots, cheads)
1817 droots, cheads)
1815 for newremotehead in outdated:
1818 for newremotehead in outdated:
1816 r = remote.pushkey('phases',
1819 r = remote.pushkey('phases',
1817 newremotehead.hex(),
1820 newremotehead.hex(),
1818 str(phases.draft),
1821 str(phases.draft),
1819 str(phases.public))
1822 str(phases.public))
1820 if not r:
1823 if not r:
1821 self.ui.warn(_('updating %s to public failed!\n')
1824 self.ui.warn(_('updating %s to public failed!\n')
1822 % newremotehead)
1825 % newremotehead)
1823 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1824 data = self.listkeys('obsolete')['dump']
1827 data = self.listkeys('obsolete')['dump']
1825 r = remote.pushkey('obsolete', 'dump', '', data)
1828 r = remote.pushkey('obsolete', 'dump', '', data)
1826 if not r:
1829 if not r:
1827 self.ui.warn(_('failed to push obsolete markers!\n'))
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1828 finally:
1831 finally:
1829 if lock is not None:
1832 if lock is not None:
1830 lock.release()
1833 lock.release()
1831 finally:
1834 finally:
1832 locallock.release()
1835 locallock.release()
1833
1836
1834 self.ui.debug("checking for updated bookmarks\n")
1837 self.ui.debug("checking for updated bookmarks\n")
1835 rb = remote.listkeys('bookmarks')
1838 rb = remote.listkeys('bookmarks')
1836 for k in rb.keys():
1839 for k in rb.keys():
1837 if k in self._bookmarks:
1840 if k in self._bookmarks:
1838 nr, nl = rb[k], hex(self._bookmarks[k])
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1839 if nr in self:
1842 if nr in self:
1840 cr = self[nr]
1843 cr = self[nr]
1841 cl = self[nl]
1844 cl = self[nl]
1842 if cl in cr.descendants():
1845 if cl in cr.descendants():
1843 r = remote.pushkey('bookmarks', k, nr, nl)
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1844 if r:
1847 if r:
1845 self.ui.status(_("updating bookmark %s\n") % k)
1848 self.ui.status(_("updating bookmark %s\n") % k)
1846 else:
1849 else:
1847 self.ui.warn(_('updating bookmark %s'
1850 self.ui.warn(_('updating bookmark %s'
1848 ' failed!\n') % k)
1851 ' failed!\n') % k)
1849
1852
1850 return ret
1853 return ret
1851
1854
1852 def changegroupinfo(self, nodes, source):
1855 def changegroupinfo(self, nodes, source):
1853 if self.ui.verbose or source == 'bundle':
1856 if self.ui.verbose or source == 'bundle':
1854 self.ui.status(_("%d changesets found\n") % len(nodes))
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1855 if self.ui.debugflag:
1858 if self.ui.debugflag:
1856 self.ui.debug("list of changesets:\n")
1859 self.ui.debug("list of changesets:\n")
1857 for node in nodes:
1860 for node in nodes:
1858 self.ui.debug("%s\n" % hex(node))
1861 self.ui.debug("%s\n" % hex(node))
1859
1862
1860 def changegroupsubset(self, bases, heads, source):
1863 def changegroupsubset(self, bases, heads, source):
1861 """Compute a changegroup consisting of all the nodes that are
1864 """Compute a changegroup consisting of all the nodes that are
1862 descendants of any of the bases and ancestors of any of the heads.
1865 descendants of any of the bases and ancestors of any of the heads.
1863 Return a chunkbuffer object whose read() method will return
1866 Return a chunkbuffer object whose read() method will return
1864 successive changegroup chunks.
1867 successive changegroup chunks.
1865
1868
1866 It is fairly complex as determining which filenodes and which
1869 It is fairly complex as determining which filenodes and which
1867 manifest nodes need to be included for the changeset to be complete
1870 manifest nodes need to be included for the changeset to be complete
1868 is non-trivial.
1871 is non-trivial.
1869
1872
1870 Another wrinkle is doing the reverse, figuring out which changeset in
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1871 the changegroup a particular filenode or manifestnode belongs to.
1874 the changegroup a particular filenode or manifestnode belongs to.
1872 """
1875 """
1873 cl = self.changelog
1876 cl = self.changelog
1874 if not bases:
1877 if not bases:
1875 bases = [nullid]
1878 bases = [nullid]
1876 csets, bases, heads = cl.nodesbetween(bases, heads)
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1877 # We assume that all ancestors of bases are known
1880 # We assume that all ancestors of bases are known
1878 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1879 return self._changegroupsubset(common, csets, heads, source)
1882 return self._changegroupsubset(common, csets, heads, source)
1880
1883
1881 def getlocalbundle(self, source, outgoing):
1884 def getlocalbundle(self, source, outgoing):
1882 """Like getbundle, but taking a discovery.outgoing as an argument.
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1883
1886
1884 This is only implemented for local repos and reuses potentially
1887 This is only implemented for local repos and reuses potentially
1885 precomputed sets in outgoing."""
1888 precomputed sets in outgoing."""
1886 if not outgoing.missing:
1889 if not outgoing.missing:
1887 return None
1890 return None
1888 return self._changegroupsubset(outgoing.common,
1891 return self._changegroupsubset(outgoing.common,
1889 outgoing.missing,
1892 outgoing.missing,
1890 outgoing.missingheads,
1893 outgoing.missingheads,
1891 source)
1894 source)
1892
1895
1893 def getbundle(self, source, heads=None, common=None):
1896 def getbundle(self, source, heads=None, common=None):
1894 """Like changegroupsubset, but returns the set difference between the
1897 """Like changegroupsubset, but returns the set difference between the
1895 ancestors of heads and the ancestors common.
1898 ancestors of heads and the ancestors common.
1896
1899
1897 If heads is None, use the local heads. If common is None, use [nullid].
1900 If heads is None, use the local heads. If common is None, use [nullid].
1898
1901
1899 The nodes in common might not all be known locally due to the way the
1902 The nodes in common might not all be known locally due to the way the
1900 current discovery protocol works.
1903 current discovery protocol works.
1901 """
1904 """
1902 cl = self.changelog
1905 cl = self.changelog
1903 if common:
1906 if common:
1904 nm = cl.nodemap
1907 nm = cl.nodemap
1905 common = [n for n in common if n in nm]
1908 common = [n for n in common if n in nm]
1906 else:
1909 else:
1907 common = [nullid]
1910 common = [nullid]
1908 if not heads:
1911 if not heads:
1909 heads = cl.heads()
1912 heads = cl.heads()
1910 return self.getlocalbundle(source,
1913 return self.getlocalbundle(source,
1911 discovery.outgoing(cl, common, heads))
1914 discovery.outgoing(cl, common, heads))
1912
1915
1913 def _changegroupsubset(self, commonrevs, csets, heads, source):
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1914
1917
1915 cl = self.changelog
1918 cl = self.changelog
1916 mf = self.manifest
1919 mf = self.manifest
1917 mfs = {} # needed manifests
1920 mfs = {} # needed manifests
1918 fnodes = {} # needed file nodes
1921 fnodes = {} # needed file nodes
1919 changedfiles = set()
1922 changedfiles = set()
1920 fstate = ['', {}]
1923 fstate = ['', {}]
1921 count = [0, 0]
1924 count = [0, 0]
1922
1925
1923 # can we go through the fast path ?
1926 # can we go through the fast path ?
1924 heads.sort()
1927 heads.sort()
1925 if heads == sorted(self.heads()):
1928 if heads == sorted(self.heads()):
1926 return self._changegroup(csets, source)
1929 return self._changegroup(csets, source)
1927
1930
1928 # slow path
1931 # slow path
1929 self.hook('preoutgoing', throw=True, source=source)
1932 self.hook('preoutgoing', throw=True, source=source)
1930 self.changegroupinfo(csets, source)
1933 self.changegroupinfo(csets, source)
1931
1934
1932 # filter any nodes that claim to be part of the known set
1935 # filter any nodes that claim to be part of the known set
1933 def prune(revlog, missing):
1936 def prune(revlog, missing):
1934 rr, rl = revlog.rev, revlog.linkrev
1937 rr, rl = revlog.rev, revlog.linkrev
1935 return [n for n in missing
1938 return [n for n in missing
1936 if rl(rr(n)) not in commonrevs]
1939 if rl(rr(n)) not in commonrevs]
1937
1940
1938 progress = self.ui.progress
1941 progress = self.ui.progress
1939 _bundling = _('bundling')
1942 _bundling = _('bundling')
1940 _changesets = _('changesets')
1943 _changesets = _('changesets')
1941 _manifests = _('manifests')
1944 _manifests = _('manifests')
1942 _files = _('files')
1945 _files = _('files')
1943
1946
1944 def lookup(revlog, x):
1947 def lookup(revlog, x):
1945 if revlog == cl:
1948 if revlog == cl:
1946 c = cl.read(x)
1949 c = cl.read(x)
1947 changedfiles.update(c[3])
1950 changedfiles.update(c[3])
1948 mfs.setdefault(c[0], x)
1951 mfs.setdefault(c[0], x)
1949 count[0] += 1
1952 count[0] += 1
1950 progress(_bundling, count[0],
1953 progress(_bundling, count[0],
1951 unit=_changesets, total=count[1])
1954 unit=_changesets, total=count[1])
1952 return x
1955 return x
1953 elif revlog == mf:
1956 elif revlog == mf:
1954 clnode = mfs[x]
1957 clnode = mfs[x]
1955 mdata = mf.readfast(x)
1958 mdata = mf.readfast(x)
1956 for f, n in mdata.iteritems():
1959 for f, n in mdata.iteritems():
1957 if f in changedfiles:
1960 if f in changedfiles:
1958 fnodes[f].setdefault(n, clnode)
1961 fnodes[f].setdefault(n, clnode)
1959 count[0] += 1
1962 count[0] += 1
1960 progress(_bundling, count[0],
1963 progress(_bundling, count[0],
1961 unit=_manifests, total=count[1])
1964 unit=_manifests, total=count[1])
1962 return clnode
1965 return clnode
1963 else:
1966 else:
1964 progress(_bundling, count[0], item=fstate[0],
1967 progress(_bundling, count[0], item=fstate[0],
1965 unit=_files, total=count[1])
1968 unit=_files, total=count[1])
1966 return fstate[1][x]
1969 return fstate[1][x]
1967
1970
1968 bundler = changegroup.bundle10(lookup)
1971 bundler = changegroup.bundle10(lookup)
1969 reorder = self.ui.config('bundle', 'reorder', 'auto')
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1970 if reorder == 'auto':
1973 if reorder == 'auto':
1971 reorder = None
1974 reorder = None
1972 else:
1975 else:
1973 reorder = util.parsebool(reorder)
1976 reorder = util.parsebool(reorder)
1974
1977
1975 def gengroup():
1978 def gengroup():
1976 # Create a changenode group generator that will call our functions
1979 # Create a changenode group generator that will call our functions
1977 # back to lookup the owning changenode and collect information.
1980 # back to lookup the owning changenode and collect information.
1978 count[:] = [0, len(csets)]
1981 count[:] = [0, len(csets)]
1979 for chunk in cl.group(csets, bundler, reorder=reorder):
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1980 yield chunk
1983 yield chunk
1981 progress(_bundling, None)
1984 progress(_bundling, None)
1982
1985
1983 # Create a generator for the manifestnodes that calls our lookup
1986 # Create a generator for the manifestnodes that calls our lookup
1984 # and data collection functions back.
1987 # and data collection functions back.
1985 for f in changedfiles:
1988 for f in changedfiles:
1986 fnodes[f] = {}
1989 fnodes[f] = {}
1987 count[:] = [0, len(mfs)]
1990 count[:] = [0, len(mfs)]
1988 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1989 yield chunk
1992 yield chunk
1990 progress(_bundling, None)
1993 progress(_bundling, None)
1991
1994
1992 mfs.clear()
1995 mfs.clear()
1993
1996
1994 # Go through all our files in order sorted by name.
1997 # Go through all our files in order sorted by name.
1995 count[:] = [0, len(changedfiles)]
1998 count[:] = [0, len(changedfiles)]
1996 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
1997 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
1998 if not len(filerevlog):
2001 if not len(filerevlog):
1999 raise util.Abort(_("empty or missing revlog for %s")
2002 raise util.Abort(_("empty or missing revlog for %s")
2000 % fname)
2003 % fname)
2001 fstate[0] = fname
2004 fstate[0] = fname
2002 fstate[1] = fnodes.pop(fname, {})
2005 fstate[1] = fnodes.pop(fname, {})
2003
2006
2004 nodelist = prune(filerevlog, fstate[1])
2007 nodelist = prune(filerevlog, fstate[1])
2005 if nodelist:
2008 if nodelist:
2006 count[0] += 1
2009 count[0] += 1
2007 yield bundler.fileheader(fname)
2010 yield bundler.fileheader(fname)
2008 for chunk in filerevlog.group(nodelist, bundler, reorder):
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2009 yield chunk
2012 yield chunk
2010
2013
2011 # Signal that no more groups are left.
2014 # Signal that no more groups are left.
2012 yield bundler.close()
2015 yield bundler.close()
2013 progress(_bundling, None)
2016 progress(_bundling, None)
2014
2017
2015 if csets:
2018 if csets:
2016 self.hook('outgoing', node=hex(csets[0]), source=source)
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2017
2020
2018 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2019
2022
2020 def changegroup(self, basenodes, source):
2023 def changegroup(self, basenodes, source):
2021 # to avoid a race we use changegroupsubset() (issue1320)
2024 # to avoid a race we use changegroupsubset() (issue1320)
2022 return self.changegroupsubset(basenodes, self.heads(), source)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2023
2026
2024 def _changegroup(self, nodes, source):
2027 def _changegroup(self, nodes, source):
2025 """Compute the changegroup of all nodes that we have that a recipient
2028 """Compute the changegroup of all nodes that we have that a recipient
2026 doesn't. Return a chunkbuffer object whose read() method will return
2029 doesn't. Return a chunkbuffer object whose read() method will return
2027 successive changegroup chunks.
2030 successive changegroup chunks.
2028
2031
2029 This is much easier than the previous function as we can assume that
2032 This is much easier than the previous function as we can assume that
2030 the recipient has any changenode we aren't sending them.
2033 the recipient has any changenode we aren't sending them.
2031
2034
2032 nodes is the set of nodes to send"""
2035 nodes is the set of nodes to send"""
2033
2036
2034 cl = self.changelog
2037 cl = self.changelog
2035 mf = self.manifest
2038 mf = self.manifest
2036 mfs = {}
2039 mfs = {}
2037 changedfiles = set()
2040 changedfiles = set()
2038 fstate = ['']
2041 fstate = ['']
2039 count = [0, 0]
2042 count = [0, 0]
2040
2043
2041 self.hook('preoutgoing', throw=True, source=source)
2044 self.hook('preoutgoing', throw=True, source=source)
2042 self.changegroupinfo(nodes, source)
2045 self.changegroupinfo(nodes, source)
2043
2046
2044 revset = set([cl.rev(n) for n in nodes])
2047 revset = set([cl.rev(n) for n in nodes])
2045
2048
2046 def gennodelst(log):
2049 def gennodelst(log):
2047 ln, llr = log.node, log.linkrev
2050 ln, llr = log.node, log.linkrev
2048 return [ln(r) for r in log if llr(r) in revset]
2051 return [ln(r) for r in log if llr(r) in revset]
2049
2052
2050 progress = self.ui.progress
2053 progress = self.ui.progress
2051 _bundling = _('bundling')
2054 _bundling = _('bundling')
2052 _changesets = _('changesets')
2055 _changesets = _('changesets')
2053 _manifests = _('manifests')
2056 _manifests = _('manifests')
2054 _files = _('files')
2057 _files = _('files')
2055
2058
2056 def lookup(revlog, x):
2059 def lookup(revlog, x):
2057 if revlog == cl:
2060 if revlog == cl:
2058 c = cl.read(x)
2061 c = cl.read(x)
2059 changedfiles.update(c[3])
2062 changedfiles.update(c[3])
2060 mfs.setdefault(c[0], x)
2063 mfs.setdefault(c[0], x)
2061 count[0] += 1
2064 count[0] += 1
2062 progress(_bundling, count[0],
2065 progress(_bundling, count[0],
2063 unit=_changesets, total=count[1])
2066 unit=_changesets, total=count[1])
2064 return x
2067 return x
2065 elif revlog == mf:
2068 elif revlog == mf:
2066 count[0] += 1
2069 count[0] += 1
2067 progress(_bundling, count[0],
2070 progress(_bundling, count[0],
2068 unit=_manifests, total=count[1])
2071 unit=_manifests, total=count[1])
2069 return cl.node(revlog.linkrev(revlog.rev(x)))
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2070 else:
2073 else:
2071 progress(_bundling, count[0], item=fstate[0],
2074 progress(_bundling, count[0], item=fstate[0],
2072 total=count[1], unit=_files)
2075 total=count[1], unit=_files)
2073 return cl.node(revlog.linkrev(revlog.rev(x)))
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2074
2077
2075 bundler = changegroup.bundle10(lookup)
2078 bundler = changegroup.bundle10(lookup)
2076 reorder = self.ui.config('bundle', 'reorder', 'auto')
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2077 if reorder == 'auto':
2080 if reorder == 'auto':
2078 reorder = None
2081 reorder = None
2079 else:
2082 else:
2080 reorder = util.parsebool(reorder)
2083 reorder = util.parsebool(reorder)
2081
2084
2082 def gengroup():
2085 def gengroup():
2083 '''yield a sequence of changegroup chunks (strings)'''
2086 '''yield a sequence of changegroup chunks (strings)'''
2084 # construct a list of all changed files
2087 # construct a list of all changed files
2085
2088
2086 count[:] = [0, len(nodes)]
2089 count[:] = [0, len(nodes)]
2087 for chunk in cl.group(nodes, bundler, reorder=reorder):
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2088 yield chunk
2091 yield chunk
2089 progress(_bundling, None)
2092 progress(_bundling, None)
2090
2093
2091 count[:] = [0, len(mfs)]
2094 count[:] = [0, len(mfs)]
2092 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2093 yield chunk
2096 yield chunk
2094 progress(_bundling, None)
2097 progress(_bundling, None)
2095
2098
2096 count[:] = [0, len(changedfiles)]
2099 count[:] = [0, len(changedfiles)]
2097 for fname in sorted(changedfiles):
2100 for fname in sorted(changedfiles):
2098 filerevlog = self.file(fname)
2101 filerevlog = self.file(fname)
2099 if not len(filerevlog):
2102 if not len(filerevlog):
2100 raise util.Abort(_("empty or missing revlog for %s")
2103 raise util.Abort(_("empty or missing revlog for %s")
2101 % fname)
2104 % fname)
2102 fstate[0] = fname
2105 fstate[0] = fname
2103 nodelist = gennodelst(filerevlog)
2106 nodelist = gennodelst(filerevlog)
2104 if nodelist:
2107 if nodelist:
2105 count[0] += 1
2108 count[0] += 1
2106 yield bundler.fileheader(fname)
2109 yield bundler.fileheader(fname)
2107 for chunk in filerevlog.group(nodelist, bundler, reorder):
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2108 yield chunk
2111 yield chunk
2109 yield bundler.close()
2112 yield bundler.close()
2110 progress(_bundling, None)
2113 progress(_bundling, None)
2111
2114
2112 if nodes:
2115 if nodes:
2113 self.hook('outgoing', node=hex(nodes[0]), source=source)
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2114
2117
2115 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2116
2119
2117 def addchangegroup(self, source, srctype, url, emptyok=False):
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2118 """Add the changegroup returned by source.read() to this repo.
2121 """Add the changegroup returned by source.read() to this repo.
2119 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2120 the URL of the repo where this changegroup is coming from.
2123 the URL of the repo where this changegroup is coming from.
2121
2124
2122 Return an integer summarizing the change to this repo:
2125 Return an integer summarizing the change to this repo:
2123 - nothing changed or no source: 0
2126 - nothing changed or no source: 0
2124 - more heads than before: 1+added heads (2..n)
2127 - more heads than before: 1+added heads (2..n)
2125 - fewer heads than before: -1-removed heads (-2..-n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2126 - number of heads stays the same: 1
2129 - number of heads stays the same: 1
2127 """
2130 """
2128 def csmap(x):
2131 def csmap(x):
2129 self.ui.debug("add changeset %s\n" % short(x))
2132 self.ui.debug("add changeset %s\n" % short(x))
2130 return len(cl)
2133 return len(cl)
2131
2134
2132 def revmap(x):
2135 def revmap(x):
2133 return cl.rev(x)
2136 return cl.rev(x)
2134
2137
2135 if not source:
2138 if not source:
2136 return 0
2139 return 0
2137
2140
2138 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2139
2142
2140 changesets = files = revisions = 0
2143 changesets = files = revisions = 0
2141 efiles = set()
2144 efiles = set()
2142
2145
2143 # write changelog data to temp files so concurrent readers will not see
2146 # write changelog data to temp files so concurrent readers will not see
2144 # inconsistent view
2147 # inconsistent view
2145 cl = self.changelog
2148 cl = self.changelog
2146 cl.delayupdate()
2149 cl.delayupdate()
2147 oldheads = cl.heads()
2150 oldheads = cl.heads()
2148
2151
2149 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2150 try:
2153 try:
2151 trp = weakref.proxy(tr)
2154 trp = weakref.proxy(tr)
2152 # pull off the changeset group
2155 # pull off the changeset group
2153 self.ui.status(_("adding changesets\n"))
2156 self.ui.status(_("adding changesets\n"))
2154 clstart = len(cl)
2157 clstart = len(cl)
2155 class prog(object):
2158 class prog(object):
2156 step = _('changesets')
2159 step = _('changesets')
2157 count = 1
2160 count = 1
2158 ui = self.ui
2161 ui = self.ui
2159 total = None
2162 total = None
2160 def __call__(self):
2163 def __call__(self):
2161 self.ui.progress(self.step, self.count, unit=_('chunks'),
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2162 total=self.total)
2165 total=self.total)
2163 self.count += 1
2166 self.count += 1
2164 pr = prog()
2167 pr = prog()
2165 source.callback = pr
2168 source.callback = pr
2166
2169
2167 source.changelogheader()
2170 source.changelogheader()
2168 srccontent = cl.addgroup(source, csmap, trp)
2171 srccontent = cl.addgroup(source, csmap, trp)
2169 if not (srccontent or emptyok):
2172 if not (srccontent or emptyok):
2170 raise util.Abort(_("received changelog group is empty"))
2173 raise util.Abort(_("received changelog group is empty"))
2171 clend = len(cl)
2174 clend = len(cl)
2172 changesets = clend - clstart
2175 changesets = clend - clstart
2173 for c in xrange(clstart, clend):
2176 for c in xrange(clstart, clend):
2174 efiles.update(self[c].files())
2177 efiles.update(self[c].files())
2175 efiles = len(efiles)
2178 efiles = len(efiles)
2176 self.ui.progress(_('changesets'), None)
2179 self.ui.progress(_('changesets'), None)
2177
2180
2178 # pull off the manifest group
2181 # pull off the manifest group
2179 self.ui.status(_("adding manifests\n"))
2182 self.ui.status(_("adding manifests\n"))
2180 pr.step = _('manifests')
2183 pr.step = _('manifests')
2181 pr.count = 1
2184 pr.count = 1
2182 pr.total = changesets # manifests <= changesets
2185 pr.total = changesets # manifests <= changesets
2183 # no need to check for empty manifest group here:
2186 # no need to check for empty manifest group here:
2184 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2185 # no new manifest will be created and the manifest group will
2188 # no new manifest will be created and the manifest group will
2186 # be empty during the pull
2189 # be empty during the pull
2187 source.manifestheader()
2190 source.manifestheader()
2188 self.manifest.addgroup(source, revmap, trp)
2191 self.manifest.addgroup(source, revmap, trp)
2189 self.ui.progress(_('manifests'), None)
2192 self.ui.progress(_('manifests'), None)
2190
2193
2191 needfiles = {}
2194 needfiles = {}
2192 if self.ui.configbool('server', 'validate', default=False):
2195 if self.ui.configbool('server', 'validate', default=False):
2193 # validate incoming csets have their manifests
2196 # validate incoming csets have their manifests
2194 for cset in xrange(clstart, clend):
2197 for cset in xrange(clstart, clend):
2195 mfest = self.changelog.read(self.changelog.node(cset))[0]
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2196 mfest = self.manifest.readdelta(mfest)
2199 mfest = self.manifest.readdelta(mfest)
2197 # store file nodes we must see
2200 # store file nodes we must see
2198 for f, n in mfest.iteritems():
2201 for f, n in mfest.iteritems():
2199 needfiles.setdefault(f, set()).add(n)
2202 needfiles.setdefault(f, set()).add(n)
2200
2203
2201 # process the files
2204 # process the files
2202 self.ui.status(_("adding file changes\n"))
2205 self.ui.status(_("adding file changes\n"))
2203 pr.step = _('files')
2206 pr.step = _('files')
2204 pr.count = 1
2207 pr.count = 1
2205 pr.total = efiles
2208 pr.total = efiles
2206 source.callback = None
2209 source.callback = None
2207
2210
2208 while True:
2211 while True:
2209 chunkdata = source.filelogheader()
2212 chunkdata = source.filelogheader()
2210 if not chunkdata:
2213 if not chunkdata:
2211 break
2214 break
2212 f = chunkdata["filename"]
2215 f = chunkdata["filename"]
2213 self.ui.debug("adding %s revisions\n" % f)
2216 self.ui.debug("adding %s revisions\n" % f)
2214 pr()
2217 pr()
2215 fl = self.file(f)
2218 fl = self.file(f)
2216 o = len(fl)
2219 o = len(fl)
2217 if not fl.addgroup(source, revmap, trp):
2220 if not fl.addgroup(source, revmap, trp):
2218 raise util.Abort(_("received file revlog group is empty"))
2221 raise util.Abort(_("received file revlog group is empty"))
2219 revisions += len(fl) - o
2222 revisions += len(fl) - o
2220 files += 1
2223 files += 1
2221 if f in needfiles:
2224 if f in needfiles:
2222 needs = needfiles[f]
2225 needs = needfiles[f]
2223 for new in xrange(o, len(fl)):
2226 for new in xrange(o, len(fl)):
2224 n = fl.node(new)
2227 n = fl.node(new)
2225 if n in needs:
2228 if n in needs:
2226 needs.remove(n)
2229 needs.remove(n)
2227 if not needs:
2230 if not needs:
2228 del needfiles[f]
2231 del needfiles[f]
2229 self.ui.progress(_('files'), None)
2232 self.ui.progress(_('files'), None)
2230
2233
2231 for f, needs in needfiles.iteritems():
2234 for f, needs in needfiles.iteritems():
2232 fl = self.file(f)
2235 fl = self.file(f)
2233 for n in needs:
2236 for n in needs:
2234 try:
2237 try:
2235 fl.rev(n)
2238 fl.rev(n)
2236 except error.LookupError:
2239 except error.LookupError:
2237 raise util.Abort(
2240 raise util.Abort(
2238 _('missing file data for %s:%s - run hg verify') %
2241 _('missing file data for %s:%s - run hg verify') %
2239 (f, hex(n)))
2242 (f, hex(n)))
2240
2243
2241 dh = 0
2244 dh = 0
2242 if oldheads:
2245 if oldheads:
2243 heads = cl.heads()
2246 heads = cl.heads()
2244 dh = len(heads) - len(oldheads)
2247 dh = len(heads) - len(oldheads)
2245 for h in heads:
2248 for h in heads:
2246 if h not in oldheads and self[h].closesbranch():
2249 if h not in oldheads and self[h].closesbranch():
2247 dh -= 1
2250 dh -= 1
2248 htext = ""
2251 htext = ""
2249 if dh:
2252 if dh:
2250 htext = _(" (%+d heads)") % dh
2253 htext = _(" (%+d heads)") % dh
2251
2254
2252 self.ui.status(_("added %d changesets"
2255 self.ui.status(_("added %d changesets"
2253 " with %d changes to %d files%s\n")
2256 " with %d changes to %d files%s\n")
2254 % (changesets, revisions, files, htext))
2257 % (changesets, revisions, files, htext))
2255
2258
2256 if changesets > 0:
2259 if changesets > 0:
2257 p = lambda: cl.writepending() and self.root or ""
2260 p = lambda: cl.writepending() and self.root or ""
2258 self.hook('pretxnchangegroup', throw=True,
2261 self.hook('pretxnchangegroup', throw=True,
2259 node=hex(cl.node(clstart)), source=srctype,
2262 node=hex(cl.node(clstart)), source=srctype,
2260 url=url, pending=p)
2263 url=url, pending=p)
2261
2264
2262 added = [cl.node(r) for r in xrange(clstart, clend)]
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2263 publishing = self.ui.configbool('phases', 'publish', True)
2266 publishing = self.ui.configbool('phases', 'publish', True)
2264 if srctype == 'push':
2267 if srctype == 'push':
2265 # Old server can not push the boundary themself.
2268 # Old server can not push the boundary themself.
2266 # New server won't push the boundary if changeset already
2269 # New server won't push the boundary if changeset already
2267 # existed locally as secrete
2270 # existed locally as secrete
2268 #
2271 #
2269 # We should not use added here but the list of all change in
2272 # We should not use added here but the list of all change in
2270 # the bundle
2273 # the bundle
2271 if publishing:
2274 if publishing:
2272 phases.advanceboundary(self, phases.public, srccontent)
2275 phases.advanceboundary(self, phases.public, srccontent)
2273 else:
2276 else:
2274 phases.advanceboundary(self, phases.draft, srccontent)
2277 phases.advanceboundary(self, phases.draft, srccontent)
2275 phases.retractboundary(self, phases.draft, added)
2278 phases.retractboundary(self, phases.draft, added)
2276 elif srctype != 'strip':
2279 elif srctype != 'strip':
2277 # publishing only alter behavior during push
2280 # publishing only alter behavior during push
2278 #
2281 #
2279 # strip should not touch boundary at all
2282 # strip should not touch boundary at all
2280 phases.retractboundary(self, phases.draft, added)
2283 phases.retractboundary(self, phases.draft, added)
2281
2284
2282 # make changelog see real files again
2285 # make changelog see real files again
2283 cl.finalize(trp)
2286 cl.finalize(trp)
2284
2287
2285 tr.close()
2288 tr.close()
2286
2289
2287 if changesets > 0:
2290 if changesets > 0:
2288 def runhooks():
2291 def runhooks():
2289 # forcefully update the on-disk branch cache
2292 # forcefully update the on-disk branch cache
2290 self.ui.debug("updating the branch cache\n")
2293 self.ui.debug("updating the branch cache\n")
2291 self.updatebranchcache()
2294 self.updatebranchcache()
2292 self.hook("changegroup", node=hex(cl.node(clstart)),
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2293 source=srctype, url=url)
2296 source=srctype, url=url)
2294
2297
2295 for n in added:
2298 for n in added:
2296 self.hook("incoming", node=hex(n), source=srctype,
2299 self.hook("incoming", node=hex(n), source=srctype,
2297 url=url)
2300 url=url)
2298 self._afterlock(runhooks)
2301 self._afterlock(runhooks)
2299
2302
2300 finally:
2303 finally:
2301 tr.release()
2304 tr.release()
2302 # never return 0 here:
2305 # never return 0 here:
2303 if dh < 0:
2306 if dh < 0:
2304 return dh - 1
2307 return dh - 1
2305 else:
2308 else:
2306 return dh + 1
2309 return dh + 1
2307
2310
2308 def stream_in(self, remote, requirements):
2311 def stream_in(self, remote, requirements):
2309 lock = self.lock()
2312 lock = self.lock()
2310 try:
2313 try:
2311 fp = remote.stream_out()
2314 fp = remote.stream_out()
2312 l = fp.readline()
2315 l = fp.readline()
2313 try:
2316 try:
2314 resp = int(l)
2317 resp = int(l)
2315 except ValueError:
2318 except ValueError:
2316 raise error.ResponseError(
2319 raise error.ResponseError(
2317 _('unexpected response from remote server:'), l)
2320 _('unexpected response from remote server:'), l)
2318 if resp == 1:
2321 if resp == 1:
2319 raise util.Abort(_('operation forbidden by server'))
2322 raise util.Abort(_('operation forbidden by server'))
2320 elif resp == 2:
2323 elif resp == 2:
2321 raise util.Abort(_('locking the remote repository failed'))
2324 raise util.Abort(_('locking the remote repository failed'))
2322 elif resp != 0:
2325 elif resp != 0:
2323 raise util.Abort(_('the server sent an unknown error code'))
2326 raise util.Abort(_('the server sent an unknown error code'))
2324 self.ui.status(_('streaming all changes\n'))
2327 self.ui.status(_('streaming all changes\n'))
2325 l = fp.readline()
2328 l = fp.readline()
2326 try:
2329 try:
2327 total_files, total_bytes = map(int, l.split(' ', 1))
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2328 except (ValueError, TypeError):
2331 except (ValueError, TypeError):
2329 raise error.ResponseError(
2332 raise error.ResponseError(
2330 _('unexpected response from remote server:'), l)
2333 _('unexpected response from remote server:'), l)
2331 self.ui.status(_('%d files to transfer, %s of data\n') %
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2332 (total_files, util.bytecount(total_bytes)))
2335 (total_files, util.bytecount(total_bytes)))
2333 handled_bytes = 0
2336 handled_bytes = 0
2334 self.ui.progress(_('clone'), 0, total=total_bytes)
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2335 start = time.time()
2338 start = time.time()
2336 for i in xrange(total_files):
2339 for i in xrange(total_files):
2337 # XXX doesn't support '\n' or '\r' in filenames
2340 # XXX doesn't support '\n' or '\r' in filenames
2338 l = fp.readline()
2341 l = fp.readline()
2339 try:
2342 try:
2340 name, size = l.split('\0', 1)
2343 name, size = l.split('\0', 1)
2341 size = int(size)
2344 size = int(size)
2342 except (ValueError, TypeError):
2345 except (ValueError, TypeError):
2343 raise error.ResponseError(
2346 raise error.ResponseError(
2344 _('unexpected response from remote server:'), l)
2347 _('unexpected response from remote server:'), l)
2345 if self.ui.debugflag:
2348 if self.ui.debugflag:
2346 self.ui.debug('adding %s (%s)\n' %
2349 self.ui.debug('adding %s (%s)\n' %
2347 (name, util.bytecount(size)))
2350 (name, util.bytecount(size)))
2348 # for backwards compat, name was partially encoded
2351 # for backwards compat, name was partially encoded
2349 ofp = self.sopener(store.decodedir(name), 'w')
2352 ofp = self.sopener(store.decodedir(name), 'w')
2350 for chunk in util.filechunkiter(fp, limit=size):
2353 for chunk in util.filechunkiter(fp, limit=size):
2351 handled_bytes += len(chunk)
2354 handled_bytes += len(chunk)
2352 self.ui.progress(_('clone'), handled_bytes,
2355 self.ui.progress(_('clone'), handled_bytes,
2353 total=total_bytes)
2356 total=total_bytes)
2354 ofp.write(chunk)
2357 ofp.write(chunk)
2355 ofp.close()
2358 ofp.close()
2356 elapsed = time.time() - start
2359 elapsed = time.time() - start
2357 if elapsed <= 0:
2360 if elapsed <= 0:
2358 elapsed = 0.001
2361 elapsed = 0.001
2359 self.ui.progress(_('clone'), None)
2362 self.ui.progress(_('clone'), None)
2360 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2361 (util.bytecount(total_bytes), elapsed,
2364 (util.bytecount(total_bytes), elapsed,
2362 util.bytecount(total_bytes / elapsed)))
2365 util.bytecount(total_bytes / elapsed)))
2363
2366
2364 # new requirements = old non-format requirements +
2367 # new requirements = old non-format requirements +
2365 # new format-related
2368 # new format-related
2366 # requirements from the streamed-in repository
2369 # requirements from the streamed-in repository
2367 requirements.update(set(self.requirements) - self.supportedformats)
2370 requirements.update(set(self.requirements) - self.supportedformats)
2368 self._applyrequirements(requirements)
2371 self._applyrequirements(requirements)
2369 self._writerequirements()
2372 self._writerequirements()
2370
2373
2371 self.invalidate()
2374 self.invalidate()
2372 return len(self.heads()) + 1
2375 return len(self.heads()) + 1
2373 finally:
2376 finally:
2374 lock.release()
2377 lock.release()
2375
2378
2376 def clone(self, remote, heads=[], stream=False):
2379 def clone(self, remote, heads=[], stream=False):
2377 '''clone remote repository.
2380 '''clone remote repository.
2378
2381
2379 keyword arguments:
2382 keyword arguments:
2380 heads: list of revs to clone (forces use of pull)
2383 heads: list of revs to clone (forces use of pull)
2381 stream: use streaming clone if possible'''
2384 stream: use streaming clone if possible'''
2382
2385
2383 # now, all clients that can request uncompressed clones can
2386 # now, all clients that can request uncompressed clones can
2384 # read repo formats supported by all servers that can serve
2387 # read repo formats supported by all servers that can serve
2385 # them.
2388 # them.
2386
2389
2387 # if revlog format changes, client will have to check version
2390 # if revlog format changes, client will have to check version
2388 # and format flags on "stream" capability, and use
2391 # and format flags on "stream" capability, and use
2389 # uncompressed only if compatible.
2392 # uncompressed only if compatible.
2390
2393
2391 if not stream:
2394 if not stream:
2392 # if the server explicitely prefer to stream (for fast LANs)
2395 # if the server explicitely prefer to stream (for fast LANs)
2393 stream = remote.capable('stream-preferred')
2396 stream = remote.capable('stream-preferred')
2394
2397
2395 if stream and not heads:
2398 if stream and not heads:
2396 # 'stream' means remote revlog format is revlogv1 only
2399 # 'stream' means remote revlog format is revlogv1 only
2397 if remote.capable('stream'):
2400 if remote.capable('stream'):
2398 return self.stream_in(remote, set(('revlogv1',)))
2401 return self.stream_in(remote, set(('revlogv1',)))
2399 # otherwise, 'streamreqs' contains the remote revlog format
2402 # otherwise, 'streamreqs' contains the remote revlog format
2400 streamreqs = remote.capable('streamreqs')
2403 streamreqs = remote.capable('streamreqs')
2401 if streamreqs:
2404 if streamreqs:
2402 streamreqs = set(streamreqs.split(','))
2405 streamreqs = set(streamreqs.split(','))
2403 # if we support it, stream in and adjust our requirements
2406 # if we support it, stream in and adjust our requirements
2404 if not streamreqs - self.supportedformats:
2407 if not streamreqs - self.supportedformats:
2405 return self.stream_in(remote, streamreqs)
2408 return self.stream_in(remote, streamreqs)
2406 return self.pull(remote, heads)
2409 return self.pull(remote, heads)
2407
2410
2408 def pushkey(self, namespace, key, old, new):
2411 def pushkey(self, namespace, key, old, new):
2409 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2410 old=old, new=new)
2413 old=old, new=new)
2411 ret = pushkey.push(self, namespace, key, old, new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2412 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2413 ret=ret)
2416 ret=ret)
2414 return ret
2417 return ret
2415
2418
2416 def listkeys(self, namespace):
2419 def listkeys(self, namespace):
2417 self.hook('prelistkeys', throw=True, namespace=namespace)
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2418 values = pushkey.list(self, namespace)
2421 values = pushkey.list(self, namespace)
2419 self.hook('listkeys', namespace=namespace, values=values)
2422 self.hook('listkeys', namespace=namespace, values=values)
2420 return values
2423 return values
2421
2424
2422 def debugwireargs(self, one, two, three=None, four=None, five=None):
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2423 '''used to test argument passing over the wire'''
2426 '''used to test argument passing over the wire'''
2424 return "%s %s %s %s %s" % (one, two, three, four, five)
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2425
2428
2426 def savecommitmessage(self, text):
2429 def savecommitmessage(self, text):
2427 fp = self.opener('last-message.txt', 'wb')
2430 fp = self.opener('last-message.txt', 'wb')
2428 try:
2431 try:
2429 fp.write(text)
2432 fp.write(text)
2430 finally:
2433 finally:
2431 fp.close()
2434 fp.close()
2432 return self.pathto(fp.name[len(self.root)+1:])
2435 return self.pathto(fp.name[len(self.root)+1:])
2433
2436
2434 # used to avoid circular references so destructors work
2437 # used to avoid circular references so destructors work
2435 def aftertrans(files):
2438 def aftertrans(files):
2436 renamefiles = [tuple(t) for t in files]
2439 renamefiles = [tuple(t) for t in files]
2437 def a():
2440 def a():
2438 for src, dest in renamefiles:
2441 for src, dest in renamefiles:
2439 try:
2442 try:
2440 util.rename(src, dest)
2443 util.rename(src, dest)
2441 except OSError: # journal file does not yet exist
2444 except OSError: # journal file does not yet exist
2442 pass
2445 pass
2443 return a
2446 return a
2444
2447
2445 def undoname(fn):
2448 def undoname(fn):
2446 base, name = os.path.split(fn)
2449 base, name = os.path.split(fn)
2447 assert name.startswith('journal')
2450 assert name.startswith('journal')
2448 return os.path.join(base, name.replace('journal', 'undo', 1))
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2449
2452
2450 def instance(ui, path, create):
2453 def instance(ui, path, create):
2451 return localrepository(ui, util.urllocalpath(path), create)
2454 return localrepository(ui, util.urllocalpath(path), create)
2452
2455
2453 def islocal(path):
2456 def islocal(path):
2454 return True
2457 return True
@@ -1,140 +1,142
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, byterange, url, error
11 import changelog, byterange, url, error
12 import localrepo, manifest, util, scmutil, store
12 import localrepo, manifest, util, scmutil, store
13 import urllib, urllib2, errno
13 import urllib, urllib2, errno
14
14
15 class httprangereader(object):
15 class httprangereader(object):
16 def __init__(self, url, opener):
16 def __init__(self, url, opener):
17 # we assume opener has HTTPRangeHandler
17 # we assume opener has HTTPRangeHandler
18 self.url = url
18 self.url = url
19 self.pos = 0
19 self.pos = 0
20 self.opener = opener
20 self.opener = opener
21 self.name = url
21 self.name = url
22 def seek(self, pos):
22 def seek(self, pos):
23 self.pos = pos
23 self.pos = pos
24 def read(self, bytes=None):
24 def read(self, bytes=None):
25 req = urllib2.Request(self.url)
25 req = urllib2.Request(self.url)
26 end = ''
26 end = ''
27 if bytes:
27 if bytes:
28 end = self.pos + bytes - 1
28 end = self.pos + bytes - 1
29 if self.pos or end:
29 if self.pos or end:
30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
31
31
32 try:
32 try:
33 f = self.opener.open(req)
33 f = self.opener.open(req)
34 data = f.read()
34 data = f.read()
35 # Python 2.6+ defines a getcode() function, and 2.4 and
35 # Python 2.6+ defines a getcode() function, and 2.4 and
36 # 2.5 appear to always have an undocumented code attribute
36 # 2.5 appear to always have an undocumented code attribute
37 # set. If we can't read either of those, fall back to 206
37 # set. If we can't read either of those, fall back to 206
38 # and hope for the best.
38 # and hope for the best.
39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
40 except urllib2.HTTPError, inst:
40 except urllib2.HTTPError, inst:
41 num = inst.code == 404 and errno.ENOENT or None
41 num = inst.code == 404 and errno.ENOENT or None
42 raise IOError(num, inst)
42 raise IOError(num, inst)
43 except urllib2.URLError, inst:
43 except urllib2.URLError, inst:
44 raise IOError(None, inst.reason[1])
44 raise IOError(None, inst.reason[1])
45
45
46 if code == 200:
46 if code == 200:
47 # HTTPRangeHandler does nothing if remote does not support
47 # HTTPRangeHandler does nothing if remote does not support
48 # Range headers and returns the full entity. Let's slice it.
48 # Range headers and returns the full entity. Let's slice it.
49 if bytes:
49 if bytes:
50 data = data[self.pos:self.pos + bytes]
50 data = data[self.pos:self.pos + bytes]
51 else:
51 else:
52 data = data[self.pos:]
52 data = data[self.pos:]
53 elif bytes:
53 elif bytes:
54 data = data[:bytes]
54 data = data[:bytes]
55 self.pos += len(data)
55 self.pos += len(data)
56 return data
56 return data
57 def __iter__(self):
57 def __iter__(self):
58 return iter(self.read().splitlines(1))
58 return iter(self.read().splitlines(1))
59 def close(self):
59 def close(self):
60 pass
60 pass
61
61
62 def build_opener(ui, authinfo):
62 def build_opener(ui, authinfo):
63 # urllib cannot handle URLs with embedded user or passwd
63 # urllib cannot handle URLs with embedded user or passwd
64 urlopener = url.opener(ui, authinfo)
64 urlopener = url.opener(ui, authinfo)
65 urlopener.add_handler(byterange.HTTPRangeHandler())
65 urlopener.add_handler(byterange.HTTPRangeHandler())
66
66
67 class statichttpopener(scmutil.abstractopener):
67 class statichttpopener(scmutil.abstractopener):
68 def __init__(self, base):
68 def __init__(self, base):
69 self.base = base
69 self.base = base
70
70
71 def __call__(self, path, mode="r", atomictemp=None):
71 def __call__(self, path, mode="r", atomictemp=None):
72 if mode not in ('r', 'rb'):
72 if mode not in ('r', 'rb'):
73 raise IOError('Permission denied')
73 raise IOError('Permission denied')
74 f = "/".join((self.base, urllib.quote(path)))
74 f = "/".join((self.base, urllib.quote(path)))
75 return httprangereader(f, urlopener)
75 return httprangereader(f, urlopener)
76
76
77 return statichttpopener
77 return statichttpopener
78
78
79 class statichttprepository(localrepo.localrepository):
79 class statichttprepository(localrepo.localrepository):
80 def __init__(self, ui, path):
80 def __init__(self, ui, path):
81 self._url = path
81 self._url = path
82 self.ui = ui
82 self.ui = ui
83
83
84 self.root = path
84 self.root = path
85 u = util.url(path.rstrip('/') + "/.hg")
85 u = util.url(path.rstrip('/') + "/.hg")
86 self.path, authinfo = u.authinfo()
86 self.path, authinfo = u.authinfo()
87
87
88 opener = build_opener(ui, authinfo)
88 opener = build_opener(ui, authinfo)
89 self.opener = opener(self.path)
89 self.opener = opener(self.path)
90 self.vfs = self.opener
90 self._phasedefaults = []
91 self._phasedefaults = []
91
92
92 try:
93 try:
93 requirements = scmutil.readrequires(self.opener, self.supported)
94 requirements = scmutil.readrequires(self.opener, self.supported)
94 except IOError, inst:
95 except IOError, inst:
95 if inst.errno != errno.ENOENT:
96 if inst.errno != errno.ENOENT:
96 raise
97 raise
97 requirements = set()
98 requirements = set()
98
99
99 # check if it is a non-empty old-style repository
100 # check if it is a non-empty old-style repository
100 try:
101 try:
101 fp = self.opener("00changelog.i")
102 fp = self.opener("00changelog.i")
102 fp.read(1)
103 fp.read(1)
103 fp.close()
104 fp.close()
104 except IOError, inst:
105 except IOError, inst:
105 if inst.errno != errno.ENOENT:
106 if inst.errno != errno.ENOENT:
106 raise
107 raise
107 # we do not care about empty old-style repositories here
108 # we do not care about empty old-style repositories here
108 msg = _("'%s' does not appear to be an hg repository") % path
109 msg = _("'%s' does not appear to be an hg repository") % path
109 raise error.RepoError(msg)
110 raise error.RepoError(msg)
110
111
111 # setup store
112 # setup store
112 self.store = store.store(requirements, self.path, opener)
113 self.store = store.store(requirements, self.path, opener)
113 self.spath = self.store.path
114 self.spath = self.store.path
114 self.sopener = self.store.opener
115 self.sopener = self.store.opener
116 self.svfs = self.sopener
115 self.sjoin = self.store.join
117 self.sjoin = self.store.join
116 self._filecache = {}
118 self._filecache = {}
117
119
118 self.manifest = manifest.manifest(self.sopener)
120 self.manifest = manifest.manifest(self.sopener)
119 self.changelog = changelog.changelog(self.sopener)
121 self.changelog = changelog.changelog(self.sopener)
120 self._tags = None
122 self._tags = None
121 self.nodetagscache = None
123 self.nodetagscache = None
122 self._branchcache = None
124 self._branchcache = None
123 self._branchcachetip = None
125 self._branchcachetip = None
124 self.encodepats = None
126 self.encodepats = None
125 self.decodepats = None
127 self.decodepats = None
126 self.capabilities.difference_update(["pushkey"])
128 self.capabilities.difference_update(["pushkey"])
127
129
128 def url(self):
130 def url(self):
129 return self._url
131 return self._url
130
132
131 def local(self):
133 def local(self):
132 return False
134 return False
133
135
134 def lock(self, wait=True):
136 def lock(self, wait=True):
135 raise util.Abort(_('cannot lock static-http repository'))
137 raise util.Abort(_('cannot lock static-http repository'))
136
138
137 def instance(ui, path, create):
139 def instance(ui, path, create):
138 if create:
140 if create:
139 raise util.Abort(_('cannot create new static-http repository'))
141 raise util.Abort(_('cannot create new static-http repository'))
140 return statichttprepository(ui, path[7:])
142 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now