##// END OF EJS Templates
convert: differentiate between IOError and OSError on commitctx()...
Giorgos Keramidas -
r10428:e553a425 stable
parent child Browse files
Show More
@@ -1,2154 +1,2158 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self._branchcache = None # in UTF-8
101 self._branchcache = None # in UTF-8
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.nodetagscache = None
103 self.nodetagscache = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 @propertycache
108 @propertycache
109 def changelog(self):
109 def changelog(self):
110 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
113 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
116 return c
116 return c
117
117
118 @propertycache
118 @propertycache
119 def manifest(self):
119 def manifest(self):
120 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
121
121
122 @propertycache
122 @propertycache
123 def dirstate(self):
123 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
127 if changeid is None:
127 if changeid is None:
128 return context.workingctx(self)
128 return context.workingctx(self)
129 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
130
130
131 def __nonzero__(self):
131 def __nonzero__(self):
132 return True
132 return True
133
133
134 def __len__(self):
134 def __len__(self):
135 return len(self.changelog)
135 return len(self.changelog)
136
136
137 def __iter__(self):
137 def __iter__(self):
138 for i in xrange(len(self)):
138 for i in xrange(len(self)):
139 yield i
139 yield i
140
140
141 def url(self):
141 def url(self):
142 return 'file:' + self.root
142 return 'file:' + self.root
143
143
144 def hook(self, name, throw=False, **args):
144 def hook(self, name, throw=False, **args):
145 return hook.hook(self.ui, self, name, throw, **args)
145 return hook.hook(self.ui, self, name, throw, **args)
146
146
147 tag_disallowed = ':\r\n'
147 tag_disallowed = ':\r\n'
148
148
149 def _tag(self, names, node, message, local, user, date, extra={}):
149 def _tag(self, names, node, message, local, user, date, extra={}):
150 if isinstance(names, str):
150 if isinstance(names, str):
151 allchars = names
151 allchars = names
152 names = (names,)
152 names = (names,)
153 else:
153 else:
154 allchars = ''.join(names)
154 allchars = ''.join(names)
155 for c in self.tag_disallowed:
155 for c in self.tag_disallowed:
156 if c in allchars:
156 if c in allchars:
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
158
158
159 for name in names:
159 for name in names:
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 local=local)
161 local=local)
162
162
163 def writetags(fp, names, munge, prevtags):
163 def writetags(fp, names, munge, prevtags):
164 fp.seek(0, 2)
164 fp.seek(0, 2)
165 if prevtags and prevtags[-1] != '\n':
165 if prevtags and prevtags[-1] != '\n':
166 fp.write('\n')
166 fp.write('\n')
167 for name in names:
167 for name in names:
168 m = munge and munge(name) or name
168 m = munge and munge(name) or name
169 if self._tagtypes and name in self._tagtypes:
169 if self._tagtypes and name in self._tagtypes:
170 old = self._tags.get(name, nullid)
170 old = self._tags.get(name, nullid)
171 fp.write('%s %s\n' % (hex(old), m))
171 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(node), m))
172 fp.write('%s %s\n' % (hex(node), m))
173 fp.close()
173 fp.close()
174
174
175 prevtags = ''
175 prevtags = ''
176 if local:
176 if local:
177 try:
177 try:
178 fp = self.opener('localtags', 'r+')
178 fp = self.opener('localtags', 'r+')
179 except IOError:
179 except IOError:
180 fp = self.opener('localtags', 'a')
180 fp = self.opener('localtags', 'a')
181 else:
181 else:
182 prevtags = fp.read()
182 prevtags = fp.read()
183
183
184 # local tags are stored in the current charset
184 # local tags are stored in the current charset
185 writetags(fp, names, None, prevtags)
185 writetags(fp, names, None, prevtags)
186 for name in names:
186 for name in names:
187 self.hook('tag', node=hex(node), tag=name, local=local)
187 self.hook('tag', node=hex(node), tag=name, local=local)
188 return
188 return
189
189
190 try:
190 try:
191 fp = self.wfile('.hgtags', 'rb+')
191 fp = self.wfile('.hgtags', 'rb+')
192 except IOError:
192 except IOError:
193 fp = self.wfile('.hgtags', 'ab')
193 fp = self.wfile('.hgtags', 'ab')
194 else:
194 else:
195 prevtags = fp.read()
195 prevtags = fp.read()
196
196
197 # committed tags are stored in UTF-8
197 # committed tags are stored in UTF-8
198 writetags(fp, names, encoding.fromlocal, prevtags)
198 writetags(fp, names, encoding.fromlocal, prevtags)
199
199
200 if '.hgtags' not in self.dirstate:
200 if '.hgtags' not in self.dirstate:
201 self.add(['.hgtags'])
201 self.add(['.hgtags'])
202
202
203 m = match_.exact(self.root, '', ['.hgtags'])
203 m = match_.exact(self.root, '', ['.hgtags'])
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
205
205
206 for name in names:
206 for name in names:
207 self.hook('tag', node=hex(node), tag=name, local=local)
207 self.hook('tag', node=hex(node), tag=name, local=local)
208
208
209 return tagnode
209 return tagnode
210
210
211 def tag(self, names, node, message, local, user, date):
211 def tag(self, names, node, message, local, user, date):
212 '''tag a revision with one or more symbolic names.
212 '''tag a revision with one or more symbolic names.
213
213
214 names is a list of strings or, when adding a single tag, names may be a
214 names is a list of strings or, when adding a single tag, names may be a
215 string.
215 string.
216
216
217 if local is True, the tags are stored in a per-repository file.
217 if local is True, the tags are stored in a per-repository file.
218 otherwise, they are stored in the .hgtags file, and a new
218 otherwise, they are stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tags in non-version-controlled file
223 local: whether to store tags in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for x in self.status()[:5]:
232 for x in self.status()[:5]:
233 if '.hgtags' in x:
233 if '.hgtags' in x:
234 raise util.Abort(_('working copy of .hgtags is changed '
234 raise util.Abort(_('working copy of .hgtags is changed '
235 '(please commit .hgtags manually)'))
235 '(please commit .hgtags manually)'))
236
236
237 self.tags() # instantiate the cache
237 self.tags() # instantiate the cache
238 self._tag(names, node, message, local, user, date)
238 self._tag(names, node, message, local, user, date)
239
239
240 def tags(self):
240 def tags(self):
241 '''return a mapping of tag to node'''
241 '''return a mapping of tag to node'''
242 if self._tags is None:
242 if self._tags is None:
243 (self._tags, self._tagtypes) = self._findtags()
243 (self._tags, self._tagtypes) = self._findtags()
244
244
245 return self._tags
245 return self._tags
246
246
247 def _findtags(self):
247 def _findtags(self):
248 '''Do the hard work of finding tags. Return a pair of dicts
248 '''Do the hard work of finding tags. Return a pair of dicts
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 maps tag name to a string like \'global\' or \'local\'.
250 maps tag name to a string like \'global\' or \'local\'.
251 Subclasses or extensions are free to add their own tags, but
251 Subclasses or extensions are free to add their own tags, but
252 should be aware that the returned dicts will be retained for the
252 should be aware that the returned dicts will be retained for the
253 duration of the localrepo object.'''
253 duration of the localrepo object.'''
254
254
255 # XXX what tagtype should subclasses/extensions use? Currently
255 # XXX what tagtype should subclasses/extensions use? Currently
256 # mq and bookmarks add tags, but do not set the tagtype at all.
256 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # Should each extension invent its own tag type? Should there
257 # Should each extension invent its own tag type? Should there
258 # be one tagtype for all such "virtual" tags? Or is the status
258 # be one tagtype for all such "virtual" tags? Or is the status
259 # quo fine?
259 # quo fine?
260
260
261 alltags = {} # map tag name to (node, hist)
261 alltags = {} # map tag name to (node, hist)
262 tagtypes = {}
262 tagtypes = {}
263
263
264 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
264 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
265 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266
266
267 # Build the return dicts. Have to re-encode tag names because
267 # Build the return dicts. Have to re-encode tag names because
268 # the tags module always uses UTF-8 (in order not to lose info
268 # the tags module always uses UTF-8 (in order not to lose info
269 # writing to the cache), but the rest of Mercurial wants them in
269 # writing to the cache), but the rest of Mercurial wants them in
270 # local encoding.
270 # local encoding.
271 tags = {}
271 tags = {}
272 for (name, (node, hist)) in alltags.iteritems():
272 for (name, (node, hist)) in alltags.iteritems():
273 if node != nullid:
273 if node != nullid:
274 tags[encoding.tolocal(name)] = node
274 tags[encoding.tolocal(name)] = node
275 tags['tip'] = self.changelog.tip()
275 tags['tip'] = self.changelog.tip()
276 tagtypes = dict([(encoding.tolocal(name), value)
276 tagtypes = dict([(encoding.tolocal(name), value)
277 for (name, value) in tagtypes.iteritems()])
277 for (name, value) in tagtypes.iteritems()])
278 return (tags, tagtypes)
278 return (tags, tagtypes)
279
279
280 def tagtype(self, tagname):
280 def tagtype(self, tagname):
281 '''
281 '''
282 return the type of the given tag. result can be:
282 return the type of the given tag. result can be:
283
283
284 'local' : a local tag
284 'local' : a local tag
285 'global' : a global tag
285 'global' : a global tag
286 None : tag does not exist
286 None : tag does not exist
287 '''
287 '''
288
288
289 self.tags()
289 self.tags()
290
290
291 return self._tagtypes.get(tagname)
291 return self._tagtypes.get(tagname)
292
292
293 def tagslist(self):
293 def tagslist(self):
294 '''return a list of tags ordered by revision'''
294 '''return a list of tags ordered by revision'''
295 l = []
295 l = []
296 for t, n in self.tags().iteritems():
296 for t, n in self.tags().iteritems():
297 try:
297 try:
298 r = self.changelog.rev(n)
298 r = self.changelog.rev(n)
299 except:
299 except:
300 r = -2 # sort to the beginning of the list if unknown
300 r = -2 # sort to the beginning of the list if unknown
301 l.append((r, t, n))
301 l.append((r, t, n))
302 return [(t, n) for r, t, n in sorted(l)]
302 return [(t, n) for r, t, n in sorted(l)]
303
303
304 def nodetags(self, node):
304 def nodetags(self, node):
305 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
306 if not self.nodetagscache:
306 if not self.nodetagscache:
307 self.nodetagscache = {}
307 self.nodetagscache = {}
308 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
309 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
311
311
312 def _branchtags(self, partial, lrev):
312 def _branchtags(self, partial, lrev):
313 # TODO: rename this function?
313 # TODO: rename this function?
314 tiprev = len(self) - 1
314 tiprev = len(self) - 1
315 if lrev != tiprev:
315 if lrev != tiprev:
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318
318
319 return partial
319 return partial
320
320
321 def branchmap(self):
321 def branchmap(self):
322 tip = self.changelog.tip()
322 tip = self.changelog.tip()
323 if self._branchcache is not None and self._branchcachetip == tip:
323 if self._branchcache is not None and self._branchcachetip == tip:
324 return self._branchcache
324 return self._branchcache
325
325
326 oldtip = self._branchcachetip
326 oldtip = self._branchcachetip
327 self._branchcachetip = tip
327 self._branchcachetip = tip
328 if oldtip is None or oldtip not in self.changelog.nodemap:
328 if oldtip is None or oldtip not in self.changelog.nodemap:
329 partial, last, lrev = self._readbranchcache()
329 partial, last, lrev = self._readbranchcache()
330 else:
330 else:
331 lrev = self.changelog.rev(oldtip)
331 lrev = self.changelog.rev(oldtip)
332 partial = self._branchcache
332 partial = self._branchcache
333
333
334 self._branchtags(partial, lrev)
334 self._branchtags(partial, lrev)
335 # this private cache holds all heads (not just tips)
335 # this private cache holds all heads (not just tips)
336 self._branchcache = partial
336 self._branchcache = partial
337
337
338 return self._branchcache
338 return self._branchcache
339
339
340 def branchtags(self):
340 def branchtags(self):
341 '''return a dict where branch names map to the tipmost head of
341 '''return a dict where branch names map to the tipmost head of
342 the branch, open heads come before closed'''
342 the branch, open heads come before closed'''
343 bt = {}
343 bt = {}
344 for bn, heads in self.branchmap().iteritems():
344 for bn, heads in self.branchmap().iteritems():
345 head = None
345 head = None
346 for i in range(len(heads)-1, -1, -1):
346 for i in range(len(heads)-1, -1, -1):
347 h = heads[i]
347 h = heads[i]
348 if 'close' not in self.changelog.read(h)[5]:
348 if 'close' not in self.changelog.read(h)[5]:
349 head = h
349 head = h
350 break
350 break
351 # no open heads were found
351 # no open heads were found
352 if head is None:
352 if head is None:
353 head = heads[-1]
353 head = heads[-1]
354 bt[bn] = head
354 bt[bn] = head
355 return bt
355 return bt
356
356
357
357
358 def _readbranchcache(self):
358 def _readbranchcache(self):
359 partial = {}
359 partial = {}
360 try:
360 try:
361 f = self.opener("branchheads.cache")
361 f = self.opener("branchheads.cache")
362 lines = f.read().split('\n')
362 lines = f.read().split('\n')
363 f.close()
363 f.close()
364 except (IOError, OSError):
364 except (IOError, OSError):
365 return {}, nullid, nullrev
365 return {}, nullid, nullrev
366
366
367 try:
367 try:
368 last, lrev = lines.pop(0).split(" ", 1)
368 last, lrev = lines.pop(0).split(" ", 1)
369 last, lrev = bin(last), int(lrev)
369 last, lrev = bin(last), int(lrev)
370 if lrev >= len(self) or self[lrev].node() != last:
370 if lrev >= len(self) or self[lrev].node() != last:
371 # invalidate the cache
371 # invalidate the cache
372 raise ValueError('invalidating branch cache (tip differs)')
372 raise ValueError('invalidating branch cache (tip differs)')
373 for l in lines:
373 for l in lines:
374 if not l: continue
374 if not l: continue
375 node, label = l.split(" ", 1)
375 node, label = l.split(" ", 1)
376 partial.setdefault(label.strip(), []).append(bin(node))
376 partial.setdefault(label.strip(), []).append(bin(node))
377 except KeyboardInterrupt:
377 except KeyboardInterrupt:
378 raise
378 raise
379 except Exception, inst:
379 except Exception, inst:
380 if self.ui.debugflag:
380 if self.ui.debugflag:
381 self.ui.warn(str(inst), '\n')
381 self.ui.warn(str(inst), '\n')
382 partial, last, lrev = {}, nullid, nullrev
382 partial, last, lrev = {}, nullid, nullrev
383 return partial, last, lrev
383 return partial, last, lrev
384
384
385 def _writebranchcache(self, branches, tip, tiprev):
385 def _writebranchcache(self, branches, tip, tiprev):
386 try:
386 try:
387 f = self.opener("branchheads.cache", "w", atomictemp=True)
387 f = self.opener("branchheads.cache", "w", atomictemp=True)
388 f.write("%s %s\n" % (hex(tip), tiprev))
388 f.write("%s %s\n" % (hex(tip), tiprev))
389 for label, nodes in branches.iteritems():
389 for label, nodes in branches.iteritems():
390 for node in nodes:
390 for node in nodes:
391 f.write("%s %s\n" % (hex(node), label))
391 f.write("%s %s\n" % (hex(node), label))
392 f.rename()
392 f.rename()
393 except (IOError, OSError):
393 except (IOError, OSError):
394 pass
394 pass
395
395
396 def _updatebranchcache(self, partial, start, end):
396 def _updatebranchcache(self, partial, start, end):
397 # collect new branch entries
397 # collect new branch entries
398 newbranches = {}
398 newbranches = {}
399 for r in xrange(start, end):
399 for r in xrange(start, end):
400 c = self[r]
400 c = self[r]
401 newbranches.setdefault(c.branch(), []).append(c.node())
401 newbranches.setdefault(c.branch(), []).append(c.node())
402 # if older branchheads are reachable from new ones, they aren't
402 # if older branchheads are reachable from new ones, they aren't
403 # really branchheads. Note checking parents is insufficient:
403 # really branchheads. Note checking parents is insufficient:
404 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
404 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
405 for branch, newnodes in newbranches.iteritems():
405 for branch, newnodes in newbranches.iteritems():
406 bheads = partial.setdefault(branch, [])
406 bheads = partial.setdefault(branch, [])
407 bheads.extend(newnodes)
407 bheads.extend(newnodes)
408 if len(bheads) < 2:
408 if len(bheads) < 2:
409 continue
409 continue
410 newbheads = []
410 newbheads = []
411 # starting from tip means fewer passes over reachable
411 # starting from tip means fewer passes over reachable
412 while newnodes:
412 while newnodes:
413 latest = newnodes.pop()
413 latest = newnodes.pop()
414 if latest not in bheads:
414 if latest not in bheads:
415 continue
415 continue
416 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
416 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
417 reachable = self.changelog.reachable(latest, minbhrev)
417 reachable = self.changelog.reachable(latest, minbhrev)
418 bheads = [b for b in bheads if b not in reachable]
418 bheads = [b for b in bheads if b not in reachable]
419 newbheads.insert(0, latest)
419 newbheads.insert(0, latest)
420 bheads.extend(newbheads)
420 bheads.extend(newbheads)
421 partial[branch] = bheads
421 partial[branch] = bheads
422
422
423 def lookup(self, key):
423 def lookup(self, key):
424 if isinstance(key, int):
424 if isinstance(key, int):
425 return self.changelog.node(key)
425 return self.changelog.node(key)
426 elif key == '.':
426 elif key == '.':
427 return self.dirstate.parents()[0]
427 return self.dirstate.parents()[0]
428 elif key == 'null':
428 elif key == 'null':
429 return nullid
429 return nullid
430 elif key == 'tip':
430 elif key == 'tip':
431 return self.changelog.tip()
431 return self.changelog.tip()
432 n = self.changelog._match(key)
432 n = self.changelog._match(key)
433 if n:
433 if n:
434 return n
434 return n
435 if key in self.tags():
435 if key in self.tags():
436 return self.tags()[key]
436 return self.tags()[key]
437 if key in self.branchtags():
437 if key in self.branchtags():
438 return self.branchtags()[key]
438 return self.branchtags()[key]
439 n = self.changelog._partialmatch(key)
439 n = self.changelog._partialmatch(key)
440 if n:
440 if n:
441 return n
441 return n
442
442
443 # can't find key, check if it might have come from damaged dirstate
443 # can't find key, check if it might have come from damaged dirstate
444 if key in self.dirstate.parents():
444 if key in self.dirstate.parents():
445 raise error.Abort(_("working directory has unknown parent '%s'!")
445 raise error.Abort(_("working directory has unknown parent '%s'!")
446 % short(key))
446 % short(key))
447 try:
447 try:
448 if len(key) == 20:
448 if len(key) == 20:
449 key = hex(key)
449 key = hex(key)
450 except:
450 except:
451 pass
451 pass
452 raise error.RepoLookupError(_("unknown revision '%s'") % key)
452 raise error.RepoLookupError(_("unknown revision '%s'") % key)
453
453
454 def local(self):
454 def local(self):
455 return True
455 return True
456
456
457 def join(self, f):
457 def join(self, f):
458 return os.path.join(self.path, f)
458 return os.path.join(self.path, f)
459
459
460 def wjoin(self, f):
460 def wjoin(self, f):
461 return os.path.join(self.root, f)
461 return os.path.join(self.root, f)
462
462
463 def rjoin(self, f):
463 def rjoin(self, f):
464 return os.path.join(self.root, util.pconvert(f))
464 return os.path.join(self.root, util.pconvert(f))
465
465
466 def file(self, f):
466 def file(self, f):
467 if f[0] == '/':
467 if f[0] == '/':
468 f = f[1:]
468 f = f[1:]
469 return filelog.filelog(self.sopener, f)
469 return filelog.filelog(self.sopener, f)
470
470
471 def changectx(self, changeid):
471 def changectx(self, changeid):
472 return self[changeid]
472 return self[changeid]
473
473
474 def parents(self, changeid=None):
474 def parents(self, changeid=None):
475 '''get list of changectxs for parents of changeid'''
475 '''get list of changectxs for parents of changeid'''
476 return self[changeid].parents()
476 return self[changeid].parents()
477
477
478 def filectx(self, path, changeid=None, fileid=None):
478 def filectx(self, path, changeid=None, fileid=None):
479 """changeid can be a changeset revision, node, or tag.
479 """changeid can be a changeset revision, node, or tag.
480 fileid can be a file revision or node."""
480 fileid can be a file revision or node."""
481 return context.filectx(self, path, changeid, fileid)
481 return context.filectx(self, path, changeid, fileid)
482
482
483 def getcwd(self):
483 def getcwd(self):
484 return self.dirstate.getcwd()
484 return self.dirstate.getcwd()
485
485
486 def pathto(self, f, cwd=None):
486 def pathto(self, f, cwd=None):
487 return self.dirstate.pathto(f, cwd)
487 return self.dirstate.pathto(f, cwd)
488
488
489 def wfile(self, f, mode='r'):
489 def wfile(self, f, mode='r'):
490 return self.wopener(f, mode)
490 return self.wopener(f, mode)
491
491
492 def _link(self, f):
492 def _link(self, f):
493 return os.path.islink(self.wjoin(f))
493 return os.path.islink(self.wjoin(f))
494
494
495 def _filter(self, filter, filename, data):
495 def _filter(self, filter, filename, data):
496 if filter not in self.filterpats:
496 if filter not in self.filterpats:
497 l = []
497 l = []
498 for pat, cmd in self.ui.configitems(filter):
498 for pat, cmd in self.ui.configitems(filter):
499 if cmd == '!':
499 if cmd == '!':
500 continue
500 continue
501 mf = match_.match(self.root, '', [pat])
501 mf = match_.match(self.root, '', [pat])
502 fn = None
502 fn = None
503 params = cmd
503 params = cmd
504 for name, filterfn in self._datafilters.iteritems():
504 for name, filterfn in self._datafilters.iteritems():
505 if cmd.startswith(name):
505 if cmd.startswith(name):
506 fn = filterfn
506 fn = filterfn
507 params = cmd[len(name):].lstrip()
507 params = cmd[len(name):].lstrip()
508 break
508 break
509 if not fn:
509 if not fn:
510 fn = lambda s, c, **kwargs: util.filter(s, c)
510 fn = lambda s, c, **kwargs: util.filter(s, c)
511 # Wrap old filters not supporting keyword arguments
511 # Wrap old filters not supporting keyword arguments
512 if not inspect.getargspec(fn)[2]:
512 if not inspect.getargspec(fn)[2]:
513 oldfn = fn
513 oldfn = fn
514 fn = lambda s, c, **kwargs: oldfn(s, c)
514 fn = lambda s, c, **kwargs: oldfn(s, c)
515 l.append((mf, fn, params))
515 l.append((mf, fn, params))
516 self.filterpats[filter] = l
516 self.filterpats[filter] = l
517
517
518 for mf, fn, cmd in self.filterpats[filter]:
518 for mf, fn, cmd in self.filterpats[filter]:
519 if mf(filename):
519 if mf(filename):
520 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
520 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
521 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
521 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
522 break
522 break
523
523
524 return data
524 return data
525
525
526 def adddatafilter(self, name, filter):
526 def adddatafilter(self, name, filter):
527 self._datafilters[name] = filter
527 self._datafilters[name] = filter
528
528
529 def wread(self, filename):
529 def wread(self, filename):
530 if self._link(filename):
530 if self._link(filename):
531 data = os.readlink(self.wjoin(filename))
531 data = os.readlink(self.wjoin(filename))
532 else:
532 else:
533 data = self.wopener(filename, 'r').read()
533 data = self.wopener(filename, 'r').read()
534 return self._filter("encode", filename, data)
534 return self._filter("encode", filename, data)
535
535
536 def wwrite(self, filename, data, flags):
536 def wwrite(self, filename, data, flags):
537 data = self._filter("decode", filename, data)
537 data = self._filter("decode", filename, data)
538 try:
538 try:
539 os.unlink(self.wjoin(filename))
539 os.unlink(self.wjoin(filename))
540 except OSError:
540 except OSError:
541 pass
541 pass
542 if 'l' in flags:
542 if 'l' in flags:
543 self.wopener.symlink(data, filename)
543 self.wopener.symlink(data, filename)
544 else:
544 else:
545 self.wopener(filename, 'w').write(data)
545 self.wopener(filename, 'w').write(data)
546 if 'x' in flags:
546 if 'x' in flags:
547 util.set_flags(self.wjoin(filename), False, True)
547 util.set_flags(self.wjoin(filename), False, True)
548
548
549 def wwritedata(self, filename, data):
549 def wwritedata(self, filename, data):
550 return self._filter("decode", filename, data)
550 return self._filter("decode", filename, data)
551
551
552 def transaction(self):
552 def transaction(self):
553 tr = self._transref and self._transref() or None
553 tr = self._transref and self._transref() or None
554 if tr and tr.running():
554 if tr and tr.running():
555 return tr.nest()
555 return tr.nest()
556
556
557 # abort here if the journal already exists
557 # abort here if the journal already exists
558 if os.path.exists(self.sjoin("journal")):
558 if os.path.exists(self.sjoin("journal")):
559 raise error.RepoError(_("abandoned transaction found - run hg recover"))
559 raise error.RepoError(_("abandoned transaction found - run hg recover"))
560
560
561 # save dirstate for rollback
561 # save dirstate for rollback
562 try:
562 try:
563 ds = self.opener("dirstate").read()
563 ds = self.opener("dirstate").read()
564 except IOError:
564 except IOError:
565 ds = ""
565 ds = ""
566 self.opener("journal.dirstate", "w").write(ds)
566 self.opener("journal.dirstate", "w").write(ds)
567 self.opener("journal.branch", "w").write(self.dirstate.branch())
567 self.opener("journal.branch", "w").write(self.dirstate.branch())
568
568
569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
571 (self.join("journal.branch"), self.join("undo.branch"))]
571 (self.join("journal.branch"), self.join("undo.branch"))]
572 tr = transaction.transaction(self.ui.warn, self.sopener,
572 tr = transaction.transaction(self.ui.warn, self.sopener,
573 self.sjoin("journal"),
573 self.sjoin("journal"),
574 aftertrans(renames),
574 aftertrans(renames),
575 self.store.createmode)
575 self.store.createmode)
576 self._transref = weakref.ref(tr)
576 self._transref = weakref.ref(tr)
577 return tr
577 return tr
578
578
579 def recover(self):
579 def recover(self):
580 lock = self.lock()
580 lock = self.lock()
581 try:
581 try:
582 if os.path.exists(self.sjoin("journal")):
582 if os.path.exists(self.sjoin("journal")):
583 self.ui.status(_("rolling back interrupted transaction\n"))
583 self.ui.status(_("rolling back interrupted transaction\n"))
584 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
584 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
585 self.invalidate()
585 self.invalidate()
586 return True
586 return True
587 else:
587 else:
588 self.ui.warn(_("no interrupted transaction available\n"))
588 self.ui.warn(_("no interrupted transaction available\n"))
589 return False
589 return False
590 finally:
590 finally:
591 lock.release()
591 lock.release()
592
592
593 def rollback(self):
593 def rollback(self):
594 wlock = lock = None
594 wlock = lock = None
595 try:
595 try:
596 wlock = self.wlock()
596 wlock = self.wlock()
597 lock = self.lock()
597 lock = self.lock()
598 if os.path.exists(self.sjoin("undo")):
598 if os.path.exists(self.sjoin("undo")):
599 self.ui.status(_("rolling back last transaction\n"))
599 self.ui.status(_("rolling back last transaction\n"))
600 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
600 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
602 try:
602 try:
603 branch = self.opener("undo.branch").read()
603 branch = self.opener("undo.branch").read()
604 self.dirstate.setbranch(branch)
604 self.dirstate.setbranch(branch)
605 except IOError:
605 except IOError:
606 self.ui.warn(_("Named branch could not be reset, "
606 self.ui.warn(_("Named branch could not be reset, "
607 "current branch still is: %s\n")
607 "current branch still is: %s\n")
608 % encoding.tolocal(self.dirstate.branch()))
608 % encoding.tolocal(self.dirstate.branch()))
609 self.invalidate()
609 self.invalidate()
610 self.dirstate.invalidate()
610 self.dirstate.invalidate()
611 self.destroyed()
611 self.destroyed()
612 else:
612 else:
613 self.ui.warn(_("no rollback information available\n"))
613 self.ui.warn(_("no rollback information available\n"))
614 finally:
614 finally:
615 release(lock, wlock)
615 release(lock, wlock)
616
616
617 def invalidate(self):
617 def invalidate(self):
618 for a in "changelog manifest".split():
618 for a in "changelog manifest".split():
619 if a in self.__dict__:
619 if a in self.__dict__:
620 delattr(self, a)
620 delattr(self, a)
621 self._tags = None
621 self._tags = None
622 self._tagtypes = None
622 self._tagtypes = None
623 self.nodetagscache = None
623 self.nodetagscache = None
624 self._branchcache = None # in UTF-8
624 self._branchcache = None # in UTF-8
625 self._branchcachetip = None
625 self._branchcachetip = None
626
626
627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
628 try:
628 try:
629 l = lock.lock(lockname, 0, releasefn, desc=desc)
629 l = lock.lock(lockname, 0, releasefn, desc=desc)
630 except error.LockHeld, inst:
630 except error.LockHeld, inst:
631 if not wait:
631 if not wait:
632 raise
632 raise
633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
634 (desc, inst.locker))
634 (desc, inst.locker))
635 # default to 600 seconds timeout
635 # default to 600 seconds timeout
636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
637 releasefn, desc=desc)
637 releasefn, desc=desc)
638 if acquirefn:
638 if acquirefn:
639 acquirefn()
639 acquirefn()
640 return l
640 return l
641
641
642 def lock(self, wait=True):
642 def lock(self, wait=True):
643 '''Lock the repository store (.hg/store) and return a weak reference
643 '''Lock the repository store (.hg/store) and return a weak reference
644 to the lock. Use this before modifying the store (e.g. committing or
644 to the lock. Use this before modifying the store (e.g. committing or
645 stripping). If you are opening a transaction, get a lock as well.)'''
645 stripping). If you are opening a transaction, get a lock as well.)'''
646 l = self._lockref and self._lockref()
646 l = self._lockref and self._lockref()
647 if l is not None and l.held:
647 if l is not None and l.held:
648 l.lock()
648 l.lock()
649 return l
649 return l
650
650
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 _('repository %s') % self.origroot)
652 _('repository %s') % self.origroot)
653 self._lockref = weakref.ref(l)
653 self._lockref = weakref.ref(l)
654 return l
654 return l
655
655
656 def wlock(self, wait=True):
656 def wlock(self, wait=True):
657 '''Lock the non-store parts of the repository (everything under
657 '''Lock the non-store parts of the repository (everything under
658 .hg except .hg/store) and return a weak reference to the lock.
658 .hg except .hg/store) and return a weak reference to the lock.
659 Use this before modifying files in .hg.'''
659 Use this before modifying files in .hg.'''
660 l = self._wlockref and self._wlockref()
660 l = self._wlockref and self._wlockref()
661 if l is not None and l.held:
661 if l is not None and l.held:
662 l.lock()
662 l.lock()
663 return l
663 return l
664
664
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 self.dirstate.invalidate, _('working directory of %s') %
666 self.dirstate.invalidate, _('working directory of %s') %
667 self.origroot)
667 self.origroot)
668 self._wlockref = weakref.ref(l)
668 self._wlockref = weakref.ref(l)
669 return l
669 return l
670
670
671 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
671 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 """
672 """
673 commit an individual file as part of a larger transaction
673 commit an individual file as part of a larger transaction
674 """
674 """
675
675
676 fname = fctx.path()
676 fname = fctx.path()
677 text = fctx.data()
677 text = fctx.data()
678 flog = self.file(fname)
678 flog = self.file(fname)
679 fparent1 = manifest1.get(fname, nullid)
679 fparent1 = manifest1.get(fname, nullid)
680 fparent2 = fparent2o = manifest2.get(fname, nullid)
680 fparent2 = fparent2o = manifest2.get(fname, nullid)
681
681
682 meta = {}
682 meta = {}
683 copy = fctx.renamed()
683 copy = fctx.renamed()
684 if copy and copy[0] != fname:
684 if copy and copy[0] != fname:
685 # Mark the new revision of this file as a copy of another
685 # Mark the new revision of this file as a copy of another
686 # file. This copy data will effectively act as a parent
686 # file. This copy data will effectively act as a parent
687 # of this new revision. If this is a merge, the first
687 # of this new revision. If this is a merge, the first
688 # parent will be the nullid (meaning "look up the copy data")
688 # parent will be the nullid (meaning "look up the copy data")
689 # and the second one will be the other parent. For example:
689 # and the second one will be the other parent. For example:
690 #
690 #
691 # 0 --- 1 --- 3 rev1 changes file foo
691 # 0 --- 1 --- 3 rev1 changes file foo
692 # \ / rev2 renames foo to bar and changes it
692 # \ / rev2 renames foo to bar and changes it
693 # \- 2 -/ rev3 should have bar with all changes and
693 # \- 2 -/ rev3 should have bar with all changes and
694 # should record that bar descends from
694 # should record that bar descends from
695 # bar in rev2 and foo in rev1
695 # bar in rev2 and foo in rev1
696 #
696 #
697 # this allows this merge to succeed:
697 # this allows this merge to succeed:
698 #
698 #
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
701 # \- 2 --- 4 as the merge base
701 # \- 2 --- 4 as the merge base
702 #
702 #
703
703
704 cfname = copy[0]
704 cfname = copy[0]
705 crev = manifest1.get(cfname)
705 crev = manifest1.get(cfname)
706 newfparent = fparent2
706 newfparent = fparent2
707
707
708 if manifest2: # branch merge
708 if manifest2: # branch merge
709 if fparent2 == nullid or crev is None: # copied on remote side
709 if fparent2 == nullid or crev is None: # copied on remote side
710 if cfname in manifest2:
710 if cfname in manifest2:
711 crev = manifest2[cfname]
711 crev = manifest2[cfname]
712 newfparent = fparent1
712 newfparent = fparent1
713
713
714 # find source in nearest ancestor if we've lost track
714 # find source in nearest ancestor if we've lost track
715 if not crev:
715 if not crev:
716 self.ui.debug(" %s: searching for copy revision for %s\n" %
716 self.ui.debug(" %s: searching for copy revision for %s\n" %
717 (fname, cfname))
717 (fname, cfname))
718 for ancestor in self['.'].ancestors():
718 for ancestor in self['.'].ancestors():
719 if cfname in ancestor:
719 if cfname in ancestor:
720 crev = ancestor[cfname].filenode()
720 crev = ancestor[cfname].filenode()
721 break
721 break
722
722
723 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
723 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
724 meta["copy"] = cfname
724 meta["copy"] = cfname
725 meta["copyrev"] = hex(crev)
725 meta["copyrev"] = hex(crev)
726 fparent1, fparent2 = nullid, newfparent
726 fparent1, fparent2 = nullid, newfparent
727 elif fparent2 != nullid:
727 elif fparent2 != nullid:
728 # is one parent an ancestor of the other?
728 # is one parent an ancestor of the other?
729 fparentancestor = flog.ancestor(fparent1, fparent2)
729 fparentancestor = flog.ancestor(fparent1, fparent2)
730 if fparentancestor == fparent1:
730 if fparentancestor == fparent1:
731 fparent1, fparent2 = fparent2, nullid
731 fparent1, fparent2 = fparent2, nullid
732 elif fparentancestor == fparent2:
732 elif fparentancestor == fparent2:
733 fparent2 = nullid
733 fparent2 = nullid
734
734
735 # is the file changed?
735 # is the file changed?
736 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
736 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
737 changelist.append(fname)
737 changelist.append(fname)
738 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
738 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
739
739
740 # are just the flags changed during merge?
740 # are just the flags changed during merge?
741 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
741 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
742 changelist.append(fname)
742 changelist.append(fname)
743
743
744 return fparent1
744 return fparent1
745
745
746 def commit(self, text="", user=None, date=None, match=None, force=False,
746 def commit(self, text="", user=None, date=None, match=None, force=False,
747 editor=False, extra={}):
747 editor=False, extra={}):
748 """Add a new revision to current repository.
748 """Add a new revision to current repository.
749
749
750 Revision information is gathered from the working directory,
750 Revision information is gathered from the working directory,
751 match can be used to filter the committed files. If editor is
751 match can be used to filter the committed files. If editor is
752 supplied, it is called to get a commit message.
752 supplied, it is called to get a commit message.
753 """
753 """
754
754
755 def fail(f, msg):
755 def fail(f, msg):
756 raise util.Abort('%s: %s' % (f, msg))
756 raise util.Abort('%s: %s' % (f, msg))
757
757
758 if not match:
758 if not match:
759 match = match_.always(self.root, '')
759 match = match_.always(self.root, '')
760
760
761 if not force:
761 if not force:
762 vdirs = []
762 vdirs = []
763 match.dir = vdirs.append
763 match.dir = vdirs.append
764 match.bad = fail
764 match.bad = fail
765
765
766 wlock = self.wlock()
766 wlock = self.wlock()
767 try:
767 try:
768 p1, p2 = self.dirstate.parents()
768 p1, p2 = self.dirstate.parents()
769 wctx = self[None]
769 wctx = self[None]
770
770
771 if (not force and p2 != nullid and match and
771 if (not force and p2 != nullid and match and
772 (match.files() or match.anypats())):
772 (match.files() or match.anypats())):
773 raise util.Abort(_('cannot partially commit a merge '
773 raise util.Abort(_('cannot partially commit a merge '
774 '(do not specify files or patterns)'))
774 '(do not specify files or patterns)'))
775
775
776 changes = self.status(match=match, clean=force)
776 changes = self.status(match=match, clean=force)
777 if force:
777 if force:
778 changes[0].extend(changes[6]) # mq may commit unchanged files
778 changes[0].extend(changes[6]) # mq may commit unchanged files
779
779
780 # check subrepos
780 # check subrepos
781 subs = []
781 subs = []
782 for s in wctx.substate:
782 for s in wctx.substate:
783 if match(s) and wctx.sub(s).dirty():
783 if match(s) and wctx.sub(s).dirty():
784 subs.append(s)
784 subs.append(s)
785 if subs and '.hgsubstate' not in changes[0]:
785 if subs and '.hgsubstate' not in changes[0]:
786 changes[0].insert(0, '.hgsubstate')
786 changes[0].insert(0, '.hgsubstate')
787
787
788 # make sure all explicit patterns are matched
788 # make sure all explicit patterns are matched
789 if not force and match.files():
789 if not force and match.files():
790 matched = set(changes[0] + changes[1] + changes[2])
790 matched = set(changes[0] + changes[1] + changes[2])
791
791
792 for f in match.files():
792 for f in match.files():
793 if f == '.' or f in matched or f in wctx.substate:
793 if f == '.' or f in matched or f in wctx.substate:
794 continue
794 continue
795 if f in changes[3]: # missing
795 if f in changes[3]: # missing
796 fail(f, _('file not found!'))
796 fail(f, _('file not found!'))
797 if f in vdirs: # visited directory
797 if f in vdirs: # visited directory
798 d = f + '/'
798 d = f + '/'
799 for mf in matched:
799 for mf in matched:
800 if mf.startswith(d):
800 if mf.startswith(d):
801 break
801 break
802 else:
802 else:
803 fail(f, _("no match under directory!"))
803 fail(f, _("no match under directory!"))
804 elif f not in self.dirstate:
804 elif f not in self.dirstate:
805 fail(f, _("file not tracked!"))
805 fail(f, _("file not tracked!"))
806
806
807 if (not force and not extra.get("close") and p2 == nullid
807 if (not force and not extra.get("close") and p2 == nullid
808 and not (changes[0] or changes[1] or changes[2])
808 and not (changes[0] or changes[1] or changes[2])
809 and self[None].branch() == self['.'].branch()):
809 and self[None].branch() == self['.'].branch()):
810 return None
810 return None
811
811
812 ms = merge_.mergestate(self)
812 ms = merge_.mergestate(self)
813 for f in changes[0]:
813 for f in changes[0]:
814 if f in ms and ms[f] == 'u':
814 if f in ms and ms[f] == 'u':
815 raise util.Abort(_("unresolved merge conflicts "
815 raise util.Abort(_("unresolved merge conflicts "
816 "(see hg resolve)"))
816 "(see hg resolve)"))
817
817
818 cctx = context.workingctx(self, (p1, p2), text, user, date,
818 cctx = context.workingctx(self, (p1, p2), text, user, date,
819 extra, changes)
819 extra, changes)
820 if editor:
820 if editor:
821 cctx._text = editor(self, cctx, subs)
821 cctx._text = editor(self, cctx, subs)
822
822
823 # commit subs
823 # commit subs
824 if subs:
824 if subs:
825 state = wctx.substate.copy()
825 state = wctx.substate.copy()
826 for s in subs:
826 for s in subs:
827 self.ui.status(_('committing subrepository %s\n') % s)
827 self.ui.status(_('committing subrepository %s\n') % s)
828 sr = wctx.sub(s).commit(cctx._text, user, date)
828 sr = wctx.sub(s).commit(cctx._text, user, date)
829 state[s] = (state[s][0], sr)
829 state[s] = (state[s][0], sr)
830 subrepo.writestate(self, state)
830 subrepo.writestate(self, state)
831
831
832 ret = self.commitctx(cctx, True)
832 ret = self.commitctx(cctx, True)
833
833
834 # update dirstate and mergestate
834 # update dirstate and mergestate
835 for f in changes[0] + changes[1]:
835 for f in changes[0] + changes[1]:
836 self.dirstate.normal(f)
836 self.dirstate.normal(f)
837 for f in changes[2]:
837 for f in changes[2]:
838 self.dirstate.forget(f)
838 self.dirstate.forget(f)
839 self.dirstate.setparents(ret)
839 self.dirstate.setparents(ret)
840 ms.reset()
840 ms.reset()
841
841
842 return ret
842 return ret
843
843
844 finally:
844 finally:
845 wlock.release()
845 wlock.release()
846
846
847 def commitctx(self, ctx, error=False):
847 def commitctx(self, ctx, error=False):
848 """Add a new revision to current repository.
848 """Add a new revision to current repository.
849
849
850 Revision information is passed via the context argument.
850 Revision information is passed via the context argument.
851 """
851 """
852
852
853 tr = lock = None
853 tr = lock = None
854 removed = ctx.removed()
854 removed = ctx.removed()
855 p1, p2 = ctx.p1(), ctx.p2()
855 p1, p2 = ctx.p1(), ctx.p2()
856 m1 = p1.manifest().copy()
856 m1 = p1.manifest().copy()
857 m2 = p2.manifest()
857 m2 = p2.manifest()
858 user = ctx.user()
858 user = ctx.user()
859
859
860 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
860 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
861 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
861 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
862
862
863 lock = self.lock()
863 lock = self.lock()
864 try:
864 try:
865 tr = self.transaction()
865 tr = self.transaction()
866 trp = weakref.proxy(tr)
866 trp = weakref.proxy(tr)
867
867
868 # check in files
868 # check in files
869 new = {}
869 new = {}
870 changed = []
870 changed = []
871 linkrev = len(self)
871 linkrev = len(self)
872 for f in sorted(ctx.modified() + ctx.added()):
872 for f in sorted(ctx.modified() + ctx.added()):
873 self.ui.note(f + "\n")
873 self.ui.note(f + "\n")
874 try:
874 try:
875 fctx = ctx[f]
875 fctx = ctx[f]
876 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
876 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
877 changed)
877 changed)
878 m1.set(f, fctx.flags())
878 m1.set(f, fctx.flags())
879 except (OSError, IOError):
879 except OSError, inst:
880 if error:
880 self.ui.warn(_("trouble committing %s!\n") % f)
881 raise
882 except IOError, inst:
883 errcode = getattr(inst, 'errno', errno.ENOENT)
884 if error or errcode and errcode != errno.ENOENT:
881 self.ui.warn(_("trouble committing %s!\n") % f)
885 self.ui.warn(_("trouble committing %s!\n") % f)
882 raise
886 raise
883 else:
887 else:
884 removed.append(f)
888 removed.append(f)
885
889
886 # update manifest
890 # update manifest
887 m1.update(new)
891 m1.update(new)
888 removed = [f for f in sorted(removed) if f in m1 or f in m2]
892 removed = [f for f in sorted(removed) if f in m1 or f in m2]
889 drop = [f for f in removed if f in m1]
893 drop = [f for f in removed if f in m1]
890 for f in drop:
894 for f in drop:
891 del m1[f]
895 del m1[f]
892 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
896 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
893 p2.manifestnode(), (new, drop))
897 p2.manifestnode(), (new, drop))
894
898
895 # update changelog
899 # update changelog
896 self.changelog.delayupdate()
900 self.changelog.delayupdate()
897 n = self.changelog.add(mn, changed + removed, ctx.description(),
901 n = self.changelog.add(mn, changed + removed, ctx.description(),
898 trp, p1.node(), p2.node(),
902 trp, p1.node(), p2.node(),
899 user, ctx.date(), ctx.extra().copy())
903 user, ctx.date(), ctx.extra().copy())
900 p = lambda: self.changelog.writepending() and self.root or ""
904 p = lambda: self.changelog.writepending() and self.root or ""
901 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
905 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
902 parent2=xp2, pending=p)
906 parent2=xp2, pending=p)
903 self.changelog.finalize(trp)
907 self.changelog.finalize(trp)
904 tr.close()
908 tr.close()
905
909
906 if self._branchcache:
910 if self._branchcache:
907 self.branchtags()
911 self.branchtags()
908
912
909 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
913 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 return n
914 return n
911 finally:
915 finally:
912 del tr
916 del tr
913 lock.release()
917 lock.release()
914
918
915 def destroyed(self):
919 def destroyed(self):
916 '''Inform the repository that nodes have been destroyed.
920 '''Inform the repository that nodes have been destroyed.
917 Intended for use by strip and rollback, so there's a common
921 Intended for use by strip and rollback, so there's a common
918 place for anything that has to be done after destroying history.'''
922 place for anything that has to be done after destroying history.'''
919 # XXX it might be nice if we could take the list of destroyed
923 # XXX it might be nice if we could take the list of destroyed
920 # nodes, but I don't see an easy way for rollback() to do that
924 # nodes, but I don't see an easy way for rollback() to do that
921
925
922 # Ensure the persistent tag cache is updated. Doing it now
926 # Ensure the persistent tag cache is updated. Doing it now
923 # means that the tag cache only has to worry about destroyed
927 # means that the tag cache only has to worry about destroyed
924 # heads immediately after a strip/rollback. That in turn
928 # heads immediately after a strip/rollback. That in turn
925 # guarantees that "cachetip == currenttip" (comparing both rev
929 # guarantees that "cachetip == currenttip" (comparing both rev
926 # and node) always means no nodes have been added or destroyed.
930 # and node) always means no nodes have been added or destroyed.
927
931
928 # XXX this is suboptimal when qrefresh'ing: we strip the current
932 # XXX this is suboptimal when qrefresh'ing: we strip the current
929 # head, refresh the tag cache, then immediately add a new head.
933 # head, refresh the tag cache, then immediately add a new head.
930 # But I think doing it this way is necessary for the "instant
934 # But I think doing it this way is necessary for the "instant
931 # tag cache retrieval" case to work.
935 # tag cache retrieval" case to work.
932 tags_.findglobaltags(self.ui, self, {}, {})
936 tags_.findglobaltags(self.ui, self, {}, {})
933
937
934 def walk(self, match, node=None):
938 def walk(self, match, node=None):
935 '''
939 '''
936 walk recursively through the directory tree or a given
940 walk recursively through the directory tree or a given
937 changeset, finding all files matched by the match
941 changeset, finding all files matched by the match
938 function
942 function
939 '''
943 '''
940 return self[node].walk(match)
944 return self[node].walk(match)
941
945
942 def status(self, node1='.', node2=None, match=None,
946 def status(self, node1='.', node2=None, match=None,
943 ignored=False, clean=False, unknown=False):
947 ignored=False, clean=False, unknown=False):
944 """return status of files between two nodes or node and working directory
948 """return status of files between two nodes or node and working directory
945
949
946 If node1 is None, use the first dirstate parent instead.
950 If node1 is None, use the first dirstate parent instead.
947 If node2 is None, compare node1 with working directory.
951 If node2 is None, compare node1 with working directory.
948 """
952 """
949
953
950 def mfmatches(ctx):
954 def mfmatches(ctx):
951 mf = ctx.manifest().copy()
955 mf = ctx.manifest().copy()
952 for fn in mf.keys():
956 for fn in mf.keys():
953 if not match(fn):
957 if not match(fn):
954 del mf[fn]
958 del mf[fn]
955 return mf
959 return mf
956
960
957 if isinstance(node1, context.changectx):
961 if isinstance(node1, context.changectx):
958 ctx1 = node1
962 ctx1 = node1
959 else:
963 else:
960 ctx1 = self[node1]
964 ctx1 = self[node1]
961 if isinstance(node2, context.changectx):
965 if isinstance(node2, context.changectx):
962 ctx2 = node2
966 ctx2 = node2
963 else:
967 else:
964 ctx2 = self[node2]
968 ctx2 = self[node2]
965
969
966 working = ctx2.rev() is None
970 working = ctx2.rev() is None
967 parentworking = working and ctx1 == self['.']
971 parentworking = working and ctx1 == self['.']
968 match = match or match_.always(self.root, self.getcwd())
972 match = match or match_.always(self.root, self.getcwd())
969 listignored, listclean, listunknown = ignored, clean, unknown
973 listignored, listclean, listunknown = ignored, clean, unknown
970
974
971 # load earliest manifest first for caching reasons
975 # load earliest manifest first for caching reasons
972 if not working and ctx2.rev() < ctx1.rev():
976 if not working and ctx2.rev() < ctx1.rev():
973 ctx2.manifest()
977 ctx2.manifest()
974
978
975 if not parentworking:
979 if not parentworking:
976 def bad(f, msg):
980 def bad(f, msg):
977 if f not in ctx1:
981 if f not in ctx1:
978 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
982 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
979 match.bad = bad
983 match.bad = bad
980
984
981 if working: # we need to scan the working dir
985 if working: # we need to scan the working dir
982 s = self.dirstate.status(match, listignored, listclean, listunknown)
986 s = self.dirstate.status(match, listignored, listclean, listunknown)
983 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
987 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
984
988
985 # check for any possibly clean files
989 # check for any possibly clean files
986 if parentworking and cmp:
990 if parentworking and cmp:
987 fixup = []
991 fixup = []
988 # do a full compare of any files that might have changed
992 # do a full compare of any files that might have changed
989 for f in sorted(cmp):
993 for f in sorted(cmp):
990 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
994 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
991 or ctx1[f].cmp(ctx2[f].data())):
995 or ctx1[f].cmp(ctx2[f].data())):
992 modified.append(f)
996 modified.append(f)
993 else:
997 else:
994 fixup.append(f)
998 fixup.append(f)
995
999
996 if listclean:
1000 if listclean:
997 clean += fixup
1001 clean += fixup
998
1002
999 # update dirstate for files that are actually clean
1003 # update dirstate for files that are actually clean
1000 if fixup:
1004 if fixup:
1001 try:
1005 try:
1002 # updating the dirstate is optional
1006 # updating the dirstate is optional
1003 # so we don't wait on the lock
1007 # so we don't wait on the lock
1004 wlock = self.wlock(False)
1008 wlock = self.wlock(False)
1005 try:
1009 try:
1006 for f in fixup:
1010 for f in fixup:
1007 self.dirstate.normal(f)
1011 self.dirstate.normal(f)
1008 finally:
1012 finally:
1009 wlock.release()
1013 wlock.release()
1010 except error.LockError:
1014 except error.LockError:
1011 pass
1015 pass
1012
1016
1013 if not parentworking:
1017 if not parentworking:
1014 mf1 = mfmatches(ctx1)
1018 mf1 = mfmatches(ctx1)
1015 if working:
1019 if working:
1016 # we are comparing working dir against non-parent
1020 # we are comparing working dir against non-parent
1017 # generate a pseudo-manifest for the working dir
1021 # generate a pseudo-manifest for the working dir
1018 mf2 = mfmatches(self['.'])
1022 mf2 = mfmatches(self['.'])
1019 for f in cmp + modified + added:
1023 for f in cmp + modified + added:
1020 mf2[f] = None
1024 mf2[f] = None
1021 mf2.set(f, ctx2.flags(f))
1025 mf2.set(f, ctx2.flags(f))
1022 for f in removed:
1026 for f in removed:
1023 if f in mf2:
1027 if f in mf2:
1024 del mf2[f]
1028 del mf2[f]
1025 else:
1029 else:
1026 # we are comparing two revisions
1030 # we are comparing two revisions
1027 deleted, unknown, ignored = [], [], []
1031 deleted, unknown, ignored = [], [], []
1028 mf2 = mfmatches(ctx2)
1032 mf2 = mfmatches(ctx2)
1029
1033
1030 modified, added, clean = [], [], []
1034 modified, added, clean = [], [], []
1031 for fn in mf2:
1035 for fn in mf2:
1032 if fn in mf1:
1036 if fn in mf1:
1033 if (mf1.flags(fn) != mf2.flags(fn) or
1037 if (mf1.flags(fn) != mf2.flags(fn) or
1034 (mf1[fn] != mf2[fn] and
1038 (mf1[fn] != mf2[fn] and
1035 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1039 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1036 modified.append(fn)
1040 modified.append(fn)
1037 elif listclean:
1041 elif listclean:
1038 clean.append(fn)
1042 clean.append(fn)
1039 del mf1[fn]
1043 del mf1[fn]
1040 else:
1044 else:
1041 added.append(fn)
1045 added.append(fn)
1042 removed = mf1.keys()
1046 removed = mf1.keys()
1043
1047
1044 r = modified, added, removed, deleted, unknown, ignored, clean
1048 r = modified, added, removed, deleted, unknown, ignored, clean
1045 [l.sort() for l in r]
1049 [l.sort() for l in r]
1046 return r
1050 return r
1047
1051
1048 def add(self, list):
1052 def add(self, list):
1049 wlock = self.wlock()
1053 wlock = self.wlock()
1050 try:
1054 try:
1051 rejected = []
1055 rejected = []
1052 for f in list:
1056 for f in list:
1053 p = self.wjoin(f)
1057 p = self.wjoin(f)
1054 try:
1058 try:
1055 st = os.lstat(p)
1059 st = os.lstat(p)
1056 except:
1060 except:
1057 self.ui.warn(_("%s does not exist!\n") % f)
1061 self.ui.warn(_("%s does not exist!\n") % f)
1058 rejected.append(f)
1062 rejected.append(f)
1059 continue
1063 continue
1060 if st.st_size > 10000000:
1064 if st.st_size > 10000000:
1061 self.ui.warn(_("%s: files over 10MB may cause memory and"
1065 self.ui.warn(_("%s: files over 10MB may cause memory and"
1062 " performance problems\n"
1066 " performance problems\n"
1063 "(use 'hg revert %s' to unadd the file)\n")
1067 "(use 'hg revert %s' to unadd the file)\n")
1064 % (f, f))
1068 % (f, f))
1065 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1069 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1066 self.ui.warn(_("%s not added: only files and symlinks "
1070 self.ui.warn(_("%s not added: only files and symlinks "
1067 "supported currently\n") % f)
1071 "supported currently\n") % f)
1068 rejected.append(p)
1072 rejected.append(p)
1069 elif self.dirstate[f] in 'amn':
1073 elif self.dirstate[f] in 'amn':
1070 self.ui.warn(_("%s already tracked!\n") % f)
1074 self.ui.warn(_("%s already tracked!\n") % f)
1071 elif self.dirstate[f] == 'r':
1075 elif self.dirstate[f] == 'r':
1072 self.dirstate.normallookup(f)
1076 self.dirstate.normallookup(f)
1073 else:
1077 else:
1074 self.dirstate.add(f)
1078 self.dirstate.add(f)
1075 return rejected
1079 return rejected
1076 finally:
1080 finally:
1077 wlock.release()
1081 wlock.release()
1078
1082
1079 def forget(self, list):
1083 def forget(self, list):
1080 wlock = self.wlock()
1084 wlock = self.wlock()
1081 try:
1085 try:
1082 for f in list:
1086 for f in list:
1083 if self.dirstate[f] != 'a':
1087 if self.dirstate[f] != 'a':
1084 self.ui.warn(_("%s not added!\n") % f)
1088 self.ui.warn(_("%s not added!\n") % f)
1085 else:
1089 else:
1086 self.dirstate.forget(f)
1090 self.dirstate.forget(f)
1087 finally:
1091 finally:
1088 wlock.release()
1092 wlock.release()
1089
1093
1090 def remove(self, list, unlink=False):
1094 def remove(self, list, unlink=False):
1091 if unlink:
1095 if unlink:
1092 for f in list:
1096 for f in list:
1093 try:
1097 try:
1094 util.unlink(self.wjoin(f))
1098 util.unlink(self.wjoin(f))
1095 except OSError, inst:
1099 except OSError, inst:
1096 if inst.errno != errno.ENOENT:
1100 if inst.errno != errno.ENOENT:
1097 raise
1101 raise
1098 wlock = self.wlock()
1102 wlock = self.wlock()
1099 try:
1103 try:
1100 for f in list:
1104 for f in list:
1101 if unlink and os.path.exists(self.wjoin(f)):
1105 if unlink and os.path.exists(self.wjoin(f)):
1102 self.ui.warn(_("%s still exists!\n") % f)
1106 self.ui.warn(_("%s still exists!\n") % f)
1103 elif self.dirstate[f] == 'a':
1107 elif self.dirstate[f] == 'a':
1104 self.dirstate.forget(f)
1108 self.dirstate.forget(f)
1105 elif f not in self.dirstate:
1109 elif f not in self.dirstate:
1106 self.ui.warn(_("%s not tracked!\n") % f)
1110 self.ui.warn(_("%s not tracked!\n") % f)
1107 else:
1111 else:
1108 self.dirstate.remove(f)
1112 self.dirstate.remove(f)
1109 finally:
1113 finally:
1110 wlock.release()
1114 wlock.release()
1111
1115
1112 def undelete(self, list):
1116 def undelete(self, list):
1113 manifests = [self.manifest.read(self.changelog.read(p)[0])
1117 manifests = [self.manifest.read(self.changelog.read(p)[0])
1114 for p in self.dirstate.parents() if p != nullid]
1118 for p in self.dirstate.parents() if p != nullid]
1115 wlock = self.wlock()
1119 wlock = self.wlock()
1116 try:
1120 try:
1117 for f in list:
1121 for f in list:
1118 if self.dirstate[f] != 'r':
1122 if self.dirstate[f] != 'r':
1119 self.ui.warn(_("%s not removed!\n") % f)
1123 self.ui.warn(_("%s not removed!\n") % f)
1120 else:
1124 else:
1121 m = f in manifests[0] and manifests[0] or manifests[1]
1125 m = f in manifests[0] and manifests[0] or manifests[1]
1122 t = self.file(f).read(m[f])
1126 t = self.file(f).read(m[f])
1123 self.wwrite(f, t, m.flags(f))
1127 self.wwrite(f, t, m.flags(f))
1124 self.dirstate.normal(f)
1128 self.dirstate.normal(f)
1125 finally:
1129 finally:
1126 wlock.release()
1130 wlock.release()
1127
1131
1128 def copy(self, source, dest):
1132 def copy(self, source, dest):
1129 p = self.wjoin(dest)
1133 p = self.wjoin(dest)
1130 if not (os.path.exists(p) or os.path.islink(p)):
1134 if not (os.path.exists(p) or os.path.islink(p)):
1131 self.ui.warn(_("%s does not exist!\n") % dest)
1135 self.ui.warn(_("%s does not exist!\n") % dest)
1132 elif not (os.path.isfile(p) or os.path.islink(p)):
1136 elif not (os.path.isfile(p) or os.path.islink(p)):
1133 self.ui.warn(_("copy failed: %s is not a file or a "
1137 self.ui.warn(_("copy failed: %s is not a file or a "
1134 "symbolic link\n") % dest)
1138 "symbolic link\n") % dest)
1135 else:
1139 else:
1136 wlock = self.wlock()
1140 wlock = self.wlock()
1137 try:
1141 try:
1138 if self.dirstate[dest] in '?r':
1142 if self.dirstate[dest] in '?r':
1139 self.dirstate.add(dest)
1143 self.dirstate.add(dest)
1140 self.dirstate.copy(source, dest)
1144 self.dirstate.copy(source, dest)
1141 finally:
1145 finally:
1142 wlock.release()
1146 wlock.release()
1143
1147
1144 def heads(self, start=None):
1148 def heads(self, start=None):
1145 heads = self.changelog.heads(start)
1149 heads = self.changelog.heads(start)
1146 # sort the output in rev descending order
1150 # sort the output in rev descending order
1147 heads = [(-self.changelog.rev(h), h) for h in heads]
1151 heads = [(-self.changelog.rev(h), h) for h in heads]
1148 return [n for (r, n) in sorted(heads)]
1152 return [n for (r, n) in sorted(heads)]
1149
1153
1150 def branchheads(self, branch=None, start=None, closed=False):
1154 def branchheads(self, branch=None, start=None, closed=False):
1151 '''return a (possibly filtered) list of heads for the given branch
1155 '''return a (possibly filtered) list of heads for the given branch
1152
1156
1153 Heads are returned in topological order, from newest to oldest.
1157 Heads are returned in topological order, from newest to oldest.
1154 If branch is None, use the dirstate branch.
1158 If branch is None, use the dirstate branch.
1155 If start is not None, return only heads reachable from start.
1159 If start is not None, return only heads reachable from start.
1156 If closed is True, return heads that are marked as closed as well.
1160 If closed is True, return heads that are marked as closed as well.
1157 '''
1161 '''
1158 if branch is None:
1162 if branch is None:
1159 branch = self[None].branch()
1163 branch = self[None].branch()
1160 branches = self.branchmap()
1164 branches = self.branchmap()
1161 if branch not in branches:
1165 if branch not in branches:
1162 return []
1166 return []
1163 # the cache returns heads ordered lowest to highest
1167 # the cache returns heads ordered lowest to highest
1164 bheads = list(reversed(branches[branch]))
1168 bheads = list(reversed(branches[branch]))
1165 if start is not None:
1169 if start is not None:
1166 # filter out the heads that cannot be reached from startrev
1170 # filter out the heads that cannot be reached from startrev
1167 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1171 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1168 bheads = [h for h in bheads if h in fbheads]
1172 bheads = [h for h in bheads if h in fbheads]
1169 if not closed:
1173 if not closed:
1170 bheads = [h for h in bheads if
1174 bheads = [h for h in bheads if
1171 ('close' not in self.changelog.read(h)[5])]
1175 ('close' not in self.changelog.read(h)[5])]
1172 return bheads
1176 return bheads
1173
1177
1174 def branches(self, nodes):
1178 def branches(self, nodes):
1175 if not nodes:
1179 if not nodes:
1176 nodes = [self.changelog.tip()]
1180 nodes = [self.changelog.tip()]
1177 b = []
1181 b = []
1178 for n in nodes:
1182 for n in nodes:
1179 t = n
1183 t = n
1180 while 1:
1184 while 1:
1181 p = self.changelog.parents(n)
1185 p = self.changelog.parents(n)
1182 if p[1] != nullid or p[0] == nullid:
1186 if p[1] != nullid or p[0] == nullid:
1183 b.append((t, n, p[0], p[1]))
1187 b.append((t, n, p[0], p[1]))
1184 break
1188 break
1185 n = p[0]
1189 n = p[0]
1186 return b
1190 return b
1187
1191
1188 def between(self, pairs):
1192 def between(self, pairs):
1189 r = []
1193 r = []
1190
1194
1191 for top, bottom in pairs:
1195 for top, bottom in pairs:
1192 n, l, i = top, [], 0
1196 n, l, i = top, [], 0
1193 f = 1
1197 f = 1
1194
1198
1195 while n != bottom and n != nullid:
1199 while n != bottom and n != nullid:
1196 p = self.changelog.parents(n)[0]
1200 p = self.changelog.parents(n)[0]
1197 if i == f:
1201 if i == f:
1198 l.append(n)
1202 l.append(n)
1199 f = f * 2
1203 f = f * 2
1200 n = p
1204 n = p
1201 i += 1
1205 i += 1
1202
1206
1203 r.append(l)
1207 r.append(l)
1204
1208
1205 return r
1209 return r
1206
1210
1207 def findincoming(self, remote, base=None, heads=None, force=False):
1211 def findincoming(self, remote, base=None, heads=None, force=False):
1208 """Return list of roots of the subsets of missing nodes from remote
1212 """Return list of roots of the subsets of missing nodes from remote
1209
1213
1210 If base dict is specified, assume that these nodes and their parents
1214 If base dict is specified, assume that these nodes and their parents
1211 exist on the remote side and that no child of a node of base exists
1215 exist on the remote side and that no child of a node of base exists
1212 in both remote and self.
1216 in both remote and self.
1213 Furthermore base will be updated to include the nodes that exists
1217 Furthermore base will be updated to include the nodes that exists
1214 in self and remote but no children exists in self and remote.
1218 in self and remote but no children exists in self and remote.
1215 If a list of heads is specified, return only nodes which are heads
1219 If a list of heads is specified, return only nodes which are heads
1216 or ancestors of these heads.
1220 or ancestors of these heads.
1217
1221
1218 All the ancestors of base are in self and in remote.
1222 All the ancestors of base are in self and in remote.
1219 All the descendants of the list returned are missing in self.
1223 All the descendants of the list returned are missing in self.
1220 (and so we know that the rest of the nodes are missing in remote, see
1224 (and so we know that the rest of the nodes are missing in remote, see
1221 outgoing)
1225 outgoing)
1222 """
1226 """
1223 return self.findcommonincoming(remote, base, heads, force)[1]
1227 return self.findcommonincoming(remote, base, heads, force)[1]
1224
1228
1225 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1229 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1226 """Return a tuple (common, missing roots, heads) used to identify
1230 """Return a tuple (common, missing roots, heads) used to identify
1227 missing nodes from remote.
1231 missing nodes from remote.
1228
1232
1229 If base dict is specified, assume that these nodes and their parents
1233 If base dict is specified, assume that these nodes and their parents
1230 exist on the remote side and that no child of a node of base exists
1234 exist on the remote side and that no child of a node of base exists
1231 in both remote and self.
1235 in both remote and self.
1232 Furthermore base will be updated to include the nodes that exists
1236 Furthermore base will be updated to include the nodes that exists
1233 in self and remote but no children exists in self and remote.
1237 in self and remote but no children exists in self and remote.
1234 If a list of heads is specified, return only nodes which are heads
1238 If a list of heads is specified, return only nodes which are heads
1235 or ancestors of these heads.
1239 or ancestors of these heads.
1236
1240
1237 All the ancestors of base are in self and in remote.
1241 All the ancestors of base are in self and in remote.
1238 """
1242 """
1239 m = self.changelog.nodemap
1243 m = self.changelog.nodemap
1240 search = []
1244 search = []
1241 fetch = set()
1245 fetch = set()
1242 seen = set()
1246 seen = set()
1243 seenbranch = set()
1247 seenbranch = set()
1244 if base is None:
1248 if base is None:
1245 base = {}
1249 base = {}
1246
1250
1247 if not heads:
1251 if not heads:
1248 heads = remote.heads()
1252 heads = remote.heads()
1249
1253
1250 if self.changelog.tip() == nullid:
1254 if self.changelog.tip() == nullid:
1251 base[nullid] = 1
1255 base[nullid] = 1
1252 if heads != [nullid]:
1256 if heads != [nullid]:
1253 return [nullid], [nullid], list(heads)
1257 return [nullid], [nullid], list(heads)
1254 return [nullid], [], []
1258 return [nullid], [], []
1255
1259
1256 # assume we're closer to the tip than the root
1260 # assume we're closer to the tip than the root
1257 # and start by examining the heads
1261 # and start by examining the heads
1258 self.ui.status(_("searching for changes\n"))
1262 self.ui.status(_("searching for changes\n"))
1259
1263
1260 unknown = []
1264 unknown = []
1261 for h in heads:
1265 for h in heads:
1262 if h not in m:
1266 if h not in m:
1263 unknown.append(h)
1267 unknown.append(h)
1264 else:
1268 else:
1265 base[h] = 1
1269 base[h] = 1
1266
1270
1267 heads = unknown
1271 heads = unknown
1268 if not unknown:
1272 if not unknown:
1269 return base.keys(), [], []
1273 return base.keys(), [], []
1270
1274
1271 req = set(unknown)
1275 req = set(unknown)
1272 reqcnt = 0
1276 reqcnt = 0
1273
1277
1274 # search through remote branches
1278 # search through remote branches
1275 # a 'branch' here is a linear segment of history, with four parts:
1279 # a 'branch' here is a linear segment of history, with four parts:
1276 # head, root, first parent, second parent
1280 # head, root, first parent, second parent
1277 # (a branch always has two parents (or none) by definition)
1281 # (a branch always has two parents (or none) by definition)
1278 unknown = remote.branches(unknown)
1282 unknown = remote.branches(unknown)
1279 while unknown:
1283 while unknown:
1280 r = []
1284 r = []
1281 while unknown:
1285 while unknown:
1282 n = unknown.pop(0)
1286 n = unknown.pop(0)
1283 if n[0] in seen:
1287 if n[0] in seen:
1284 continue
1288 continue
1285
1289
1286 self.ui.debug("examining %s:%s\n"
1290 self.ui.debug("examining %s:%s\n"
1287 % (short(n[0]), short(n[1])))
1291 % (short(n[0]), short(n[1])))
1288 if n[0] == nullid: # found the end of the branch
1292 if n[0] == nullid: # found the end of the branch
1289 pass
1293 pass
1290 elif n in seenbranch:
1294 elif n in seenbranch:
1291 self.ui.debug("branch already found\n")
1295 self.ui.debug("branch already found\n")
1292 continue
1296 continue
1293 elif n[1] and n[1] in m: # do we know the base?
1297 elif n[1] and n[1] in m: # do we know the base?
1294 self.ui.debug("found incomplete branch %s:%s\n"
1298 self.ui.debug("found incomplete branch %s:%s\n"
1295 % (short(n[0]), short(n[1])))
1299 % (short(n[0]), short(n[1])))
1296 search.append(n[0:2]) # schedule branch range for scanning
1300 search.append(n[0:2]) # schedule branch range for scanning
1297 seenbranch.add(n)
1301 seenbranch.add(n)
1298 else:
1302 else:
1299 if n[1] not in seen and n[1] not in fetch:
1303 if n[1] not in seen and n[1] not in fetch:
1300 if n[2] in m and n[3] in m:
1304 if n[2] in m and n[3] in m:
1301 self.ui.debug("found new changeset %s\n" %
1305 self.ui.debug("found new changeset %s\n" %
1302 short(n[1]))
1306 short(n[1]))
1303 fetch.add(n[1]) # earliest unknown
1307 fetch.add(n[1]) # earliest unknown
1304 for p in n[2:4]:
1308 for p in n[2:4]:
1305 if p in m:
1309 if p in m:
1306 base[p] = 1 # latest known
1310 base[p] = 1 # latest known
1307
1311
1308 for p in n[2:4]:
1312 for p in n[2:4]:
1309 if p not in req and p not in m:
1313 if p not in req and p not in m:
1310 r.append(p)
1314 r.append(p)
1311 req.add(p)
1315 req.add(p)
1312 seen.add(n[0])
1316 seen.add(n[0])
1313
1317
1314 if r:
1318 if r:
1315 reqcnt += 1
1319 reqcnt += 1
1316 self.ui.debug("request %d: %s\n" %
1320 self.ui.debug("request %d: %s\n" %
1317 (reqcnt, " ".join(map(short, r))))
1321 (reqcnt, " ".join(map(short, r))))
1318 for p in xrange(0, len(r), 10):
1322 for p in xrange(0, len(r), 10):
1319 for b in remote.branches(r[p:p+10]):
1323 for b in remote.branches(r[p:p+10]):
1320 self.ui.debug("received %s:%s\n" %
1324 self.ui.debug("received %s:%s\n" %
1321 (short(b[0]), short(b[1])))
1325 (short(b[0]), short(b[1])))
1322 unknown.append(b)
1326 unknown.append(b)
1323
1327
1324 # do binary search on the branches we found
1328 # do binary search on the branches we found
1325 while search:
1329 while search:
1326 newsearch = []
1330 newsearch = []
1327 reqcnt += 1
1331 reqcnt += 1
1328 for n, l in zip(search, remote.between(search)):
1332 for n, l in zip(search, remote.between(search)):
1329 l.append(n[1])
1333 l.append(n[1])
1330 p = n[0]
1334 p = n[0]
1331 f = 1
1335 f = 1
1332 for i in l:
1336 for i in l:
1333 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1337 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1334 if i in m:
1338 if i in m:
1335 if f <= 2:
1339 if f <= 2:
1336 self.ui.debug("found new branch changeset %s\n" %
1340 self.ui.debug("found new branch changeset %s\n" %
1337 short(p))
1341 short(p))
1338 fetch.add(p)
1342 fetch.add(p)
1339 base[i] = 1
1343 base[i] = 1
1340 else:
1344 else:
1341 self.ui.debug("narrowed branch search to %s:%s\n"
1345 self.ui.debug("narrowed branch search to %s:%s\n"
1342 % (short(p), short(i)))
1346 % (short(p), short(i)))
1343 newsearch.append((p, i))
1347 newsearch.append((p, i))
1344 break
1348 break
1345 p, f = i, f * 2
1349 p, f = i, f * 2
1346 search = newsearch
1350 search = newsearch
1347
1351
1348 # sanity check our fetch list
1352 # sanity check our fetch list
1349 for f in fetch:
1353 for f in fetch:
1350 if f in m:
1354 if f in m:
1351 raise error.RepoError(_("already have changeset ")
1355 raise error.RepoError(_("already have changeset ")
1352 + short(f[:4]))
1356 + short(f[:4]))
1353
1357
1354 if base.keys() == [nullid]:
1358 if base.keys() == [nullid]:
1355 if force:
1359 if force:
1356 self.ui.warn(_("warning: repository is unrelated\n"))
1360 self.ui.warn(_("warning: repository is unrelated\n"))
1357 else:
1361 else:
1358 raise util.Abort(_("repository is unrelated"))
1362 raise util.Abort(_("repository is unrelated"))
1359
1363
1360 self.ui.debug("found new changesets starting at " +
1364 self.ui.debug("found new changesets starting at " +
1361 " ".join([short(f) for f in fetch]) + "\n")
1365 " ".join([short(f) for f in fetch]) + "\n")
1362
1366
1363 self.ui.debug("%d total queries\n" % reqcnt)
1367 self.ui.debug("%d total queries\n" % reqcnt)
1364
1368
1365 return base.keys(), list(fetch), heads
1369 return base.keys(), list(fetch), heads
1366
1370
1367 def findoutgoing(self, remote, base=None, heads=None, force=False):
1371 def findoutgoing(self, remote, base=None, heads=None, force=False):
1368 """Return list of nodes that are roots of subsets not in remote
1372 """Return list of nodes that are roots of subsets not in remote
1369
1373
1370 If base dict is specified, assume that these nodes and their parents
1374 If base dict is specified, assume that these nodes and their parents
1371 exist on the remote side.
1375 exist on the remote side.
1372 If a list of heads is specified, return only nodes which are heads
1376 If a list of heads is specified, return only nodes which are heads
1373 or ancestors of these heads, and return a second element which
1377 or ancestors of these heads, and return a second element which
1374 contains all remote heads which get new children.
1378 contains all remote heads which get new children.
1375 """
1379 """
1376 if base is None:
1380 if base is None:
1377 base = {}
1381 base = {}
1378 self.findincoming(remote, base, heads, force=force)
1382 self.findincoming(remote, base, heads, force=force)
1379
1383
1380 self.ui.debug("common changesets up to "
1384 self.ui.debug("common changesets up to "
1381 + " ".join(map(short, base.keys())) + "\n")
1385 + " ".join(map(short, base.keys())) + "\n")
1382
1386
1383 remain = set(self.changelog.nodemap)
1387 remain = set(self.changelog.nodemap)
1384
1388
1385 # prune everything remote has from the tree
1389 # prune everything remote has from the tree
1386 remain.remove(nullid)
1390 remain.remove(nullid)
1387 remove = base.keys()
1391 remove = base.keys()
1388 while remove:
1392 while remove:
1389 n = remove.pop(0)
1393 n = remove.pop(0)
1390 if n in remain:
1394 if n in remain:
1391 remain.remove(n)
1395 remain.remove(n)
1392 for p in self.changelog.parents(n):
1396 for p in self.changelog.parents(n):
1393 remove.append(p)
1397 remove.append(p)
1394
1398
1395 # find every node whose parents have been pruned
1399 # find every node whose parents have been pruned
1396 subset = []
1400 subset = []
1397 # find every remote head that will get new children
1401 # find every remote head that will get new children
1398 updated_heads = set()
1402 updated_heads = set()
1399 for n in remain:
1403 for n in remain:
1400 p1, p2 = self.changelog.parents(n)
1404 p1, p2 = self.changelog.parents(n)
1401 if p1 not in remain and p2 not in remain:
1405 if p1 not in remain and p2 not in remain:
1402 subset.append(n)
1406 subset.append(n)
1403 if heads:
1407 if heads:
1404 if p1 in heads:
1408 if p1 in heads:
1405 updated_heads.add(p1)
1409 updated_heads.add(p1)
1406 if p2 in heads:
1410 if p2 in heads:
1407 updated_heads.add(p2)
1411 updated_heads.add(p2)
1408
1412
1409 # this is the set of all roots we have to push
1413 # this is the set of all roots we have to push
1410 if heads:
1414 if heads:
1411 return subset, list(updated_heads)
1415 return subset, list(updated_heads)
1412 else:
1416 else:
1413 return subset
1417 return subset
1414
1418
1415 def pull(self, remote, heads=None, force=False):
1419 def pull(self, remote, heads=None, force=False):
1416 lock = self.lock()
1420 lock = self.lock()
1417 try:
1421 try:
1418 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1422 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1419 force=force)
1423 force=force)
1420 if fetch == [nullid]:
1424 if fetch == [nullid]:
1421 self.ui.status(_("requesting all changes\n"))
1425 self.ui.status(_("requesting all changes\n"))
1422
1426
1423 if not fetch:
1427 if not fetch:
1424 self.ui.status(_("no changes found\n"))
1428 self.ui.status(_("no changes found\n"))
1425 return 0
1429 return 0
1426
1430
1427 if heads is None and remote.capable('changegroupsubset'):
1431 if heads is None and remote.capable('changegroupsubset'):
1428 heads = rheads
1432 heads = rheads
1429
1433
1430 if heads is None:
1434 if heads is None:
1431 cg = remote.changegroup(fetch, 'pull')
1435 cg = remote.changegroup(fetch, 'pull')
1432 else:
1436 else:
1433 if not remote.capable('changegroupsubset'):
1437 if not remote.capable('changegroupsubset'):
1434 raise util.Abort(_("Partial pull cannot be done because "
1438 raise util.Abort(_("Partial pull cannot be done because "
1435 "other repository doesn't support "
1439 "other repository doesn't support "
1436 "changegroupsubset."))
1440 "changegroupsubset."))
1437 cg = remote.changegroupsubset(fetch, heads, 'pull')
1441 cg = remote.changegroupsubset(fetch, heads, 'pull')
1438 return self.addchangegroup(cg, 'pull', remote.url())
1442 return self.addchangegroup(cg, 'pull', remote.url())
1439 finally:
1443 finally:
1440 lock.release()
1444 lock.release()
1441
1445
1442 def push(self, remote, force=False, revs=None):
1446 def push(self, remote, force=False, revs=None):
1443 # there are two ways to push to remote repo:
1447 # there are two ways to push to remote repo:
1444 #
1448 #
1445 # addchangegroup assumes local user can lock remote
1449 # addchangegroup assumes local user can lock remote
1446 # repo (local filesystem, old ssh servers).
1450 # repo (local filesystem, old ssh servers).
1447 #
1451 #
1448 # unbundle assumes local user cannot lock remote repo (new ssh
1452 # unbundle assumes local user cannot lock remote repo (new ssh
1449 # servers, http servers).
1453 # servers, http servers).
1450
1454
1451 if remote.capable('unbundle'):
1455 if remote.capable('unbundle'):
1452 return self.push_unbundle(remote, force, revs)
1456 return self.push_unbundle(remote, force, revs)
1453 return self.push_addchangegroup(remote, force, revs)
1457 return self.push_addchangegroup(remote, force, revs)
1454
1458
1455 def prepush(self, remote, force, revs):
1459 def prepush(self, remote, force, revs):
1456 '''Analyze the local and remote repositories and determine which
1460 '''Analyze the local and remote repositories and determine which
1457 changesets need to be pushed to the remote. Return a tuple
1461 changesets need to be pushed to the remote. Return a tuple
1458 (changegroup, remoteheads). changegroup is a readable file-like
1462 (changegroup, remoteheads). changegroup is a readable file-like
1459 object whose read() returns successive changegroup chunks ready to
1463 object whose read() returns successive changegroup chunks ready to
1460 be sent over the wire. remoteheads is the list of remote heads.
1464 be sent over the wire. remoteheads is the list of remote heads.
1461 '''
1465 '''
1462 common = {}
1466 common = {}
1463 remote_heads = remote.heads()
1467 remote_heads = remote.heads()
1464 inc = self.findincoming(remote, common, remote_heads, force=force)
1468 inc = self.findincoming(remote, common, remote_heads, force=force)
1465
1469
1466 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1470 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1467 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1471 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1468
1472
1469 def checkbranch(lheads, rheads, updatelb):
1473 def checkbranch(lheads, rheads, updatelb):
1470 '''
1474 '''
1471 check whether there are more local heads than remote heads on
1475 check whether there are more local heads than remote heads on
1472 a specific branch.
1476 a specific branch.
1473
1477
1474 lheads: local branch heads
1478 lheads: local branch heads
1475 rheads: remote branch heads
1479 rheads: remote branch heads
1476 updatelb: outgoing local branch bases
1480 updatelb: outgoing local branch bases
1477 '''
1481 '''
1478
1482
1479 warn = 0
1483 warn = 0
1480
1484
1481 if not revs and len(lheads) > len(rheads):
1485 if not revs and len(lheads) > len(rheads):
1482 warn = 1
1486 warn = 1
1483 else:
1487 else:
1484 # add local heads involved in the push
1488 # add local heads involved in the push
1485 updatelheads = [self.changelog.heads(x, lheads)
1489 updatelheads = [self.changelog.heads(x, lheads)
1486 for x in updatelb]
1490 for x in updatelb]
1487 newheads = set(sum(updatelheads, [])) & set(lheads)
1491 newheads = set(sum(updatelheads, [])) & set(lheads)
1488
1492
1489 if not newheads:
1493 if not newheads:
1490 return True
1494 return True
1491
1495
1492 # add heads we don't have or that are not involved in the push
1496 # add heads we don't have or that are not involved in the push
1493 for r in rheads:
1497 for r in rheads:
1494 if r in self.changelog.nodemap:
1498 if r in self.changelog.nodemap:
1495 desc = self.changelog.heads(r, heads)
1499 desc = self.changelog.heads(r, heads)
1496 l = [h for h in heads if h in desc]
1500 l = [h for h in heads if h in desc]
1497 if not l:
1501 if not l:
1498 newheads.add(r)
1502 newheads.add(r)
1499 else:
1503 else:
1500 newheads.add(r)
1504 newheads.add(r)
1501 if len(newheads) > len(rheads):
1505 if len(newheads) > len(rheads):
1502 warn = 1
1506 warn = 1
1503
1507
1504 if warn:
1508 if warn:
1505 if not rheads: # new branch requires --force
1509 if not rheads: # new branch requires --force
1506 self.ui.warn(_("abort: push creates new"
1510 self.ui.warn(_("abort: push creates new"
1507 " remote branch '%s'!\n") %
1511 " remote branch '%s'!\n") %
1508 self[lheads[0]].branch())
1512 self[lheads[0]].branch())
1509 else:
1513 else:
1510 self.ui.warn(_("abort: push creates new remote heads!\n"))
1514 self.ui.warn(_("abort: push creates new remote heads!\n"))
1511
1515
1512 self.ui.status(_("(did you forget to merge?"
1516 self.ui.status(_("(did you forget to merge?"
1513 " use push -f to force)\n"))
1517 " use push -f to force)\n"))
1514 return False
1518 return False
1515 return True
1519 return True
1516
1520
1517 if not bases:
1521 if not bases:
1518 self.ui.status(_("no changes found\n"))
1522 self.ui.status(_("no changes found\n"))
1519 return None, 1
1523 return None, 1
1520 elif not force:
1524 elif not force:
1521 # Check for each named branch if we're creating new remote heads.
1525 # Check for each named branch if we're creating new remote heads.
1522 # To be a remote head after push, node must be either:
1526 # To be a remote head after push, node must be either:
1523 # - unknown locally
1527 # - unknown locally
1524 # - a local outgoing head descended from update
1528 # - a local outgoing head descended from update
1525 # - a remote head that's known locally and not
1529 # - a remote head that's known locally and not
1526 # ancestral to an outgoing head
1530 # ancestral to an outgoing head
1527 #
1531 #
1528 # New named branches cannot be created without --force.
1532 # New named branches cannot be created without --force.
1529
1533
1530 if remote_heads != [nullid]:
1534 if remote_heads != [nullid]:
1531 if remote.capable('branchmap'):
1535 if remote.capable('branchmap'):
1532 localhds = {}
1536 localhds = {}
1533 if not revs:
1537 if not revs:
1534 localhds = self.branchmap()
1538 localhds = self.branchmap()
1535 else:
1539 else:
1536 for n in heads:
1540 for n in heads:
1537 branch = self[n].branch()
1541 branch = self[n].branch()
1538 if branch in localhds:
1542 if branch in localhds:
1539 localhds[branch].append(n)
1543 localhds[branch].append(n)
1540 else:
1544 else:
1541 localhds[branch] = [n]
1545 localhds[branch] = [n]
1542
1546
1543 remotehds = remote.branchmap()
1547 remotehds = remote.branchmap()
1544
1548
1545 for lh in localhds:
1549 for lh in localhds:
1546 if lh in remotehds:
1550 if lh in remotehds:
1547 rheads = remotehds[lh]
1551 rheads = remotehds[lh]
1548 else:
1552 else:
1549 rheads = []
1553 rheads = []
1550 lheads = localhds[lh]
1554 lheads = localhds[lh]
1551 if not checkbranch(lheads, rheads, update):
1555 if not checkbranch(lheads, rheads, update):
1552 return None, 0
1556 return None, 0
1553 else:
1557 else:
1554 if not checkbranch(heads, remote_heads, update):
1558 if not checkbranch(heads, remote_heads, update):
1555 return None, 0
1559 return None, 0
1556
1560
1557 if inc:
1561 if inc:
1558 self.ui.warn(_("note: unsynced remote changes!\n"))
1562 self.ui.warn(_("note: unsynced remote changes!\n"))
1559
1563
1560
1564
1561 if revs is None:
1565 if revs is None:
1562 # use the fast path, no race possible on push
1566 # use the fast path, no race possible on push
1563 nodes = self.changelog.findmissing(common.keys())
1567 nodes = self.changelog.findmissing(common.keys())
1564 cg = self._changegroup(nodes, 'push')
1568 cg = self._changegroup(nodes, 'push')
1565 else:
1569 else:
1566 cg = self.changegroupsubset(update, revs, 'push')
1570 cg = self.changegroupsubset(update, revs, 'push')
1567 return cg, remote_heads
1571 return cg, remote_heads
1568
1572
1569 def push_addchangegroup(self, remote, force, revs):
1573 def push_addchangegroup(self, remote, force, revs):
1570 lock = remote.lock()
1574 lock = remote.lock()
1571 try:
1575 try:
1572 ret = self.prepush(remote, force, revs)
1576 ret = self.prepush(remote, force, revs)
1573 if ret[0] is not None:
1577 if ret[0] is not None:
1574 cg, remote_heads = ret
1578 cg, remote_heads = ret
1575 return remote.addchangegroup(cg, 'push', self.url())
1579 return remote.addchangegroup(cg, 'push', self.url())
1576 return ret[1]
1580 return ret[1]
1577 finally:
1581 finally:
1578 lock.release()
1582 lock.release()
1579
1583
1580 def push_unbundle(self, remote, force, revs):
1584 def push_unbundle(self, remote, force, revs):
1581 # local repo finds heads on server, finds out what revs it
1585 # local repo finds heads on server, finds out what revs it
1582 # must push. once revs transferred, if server finds it has
1586 # must push. once revs transferred, if server finds it has
1583 # different heads (someone else won commit/push race), server
1587 # different heads (someone else won commit/push race), server
1584 # aborts.
1588 # aborts.
1585
1589
1586 ret = self.prepush(remote, force, revs)
1590 ret = self.prepush(remote, force, revs)
1587 if ret[0] is not None:
1591 if ret[0] is not None:
1588 cg, remote_heads = ret
1592 cg, remote_heads = ret
1589 if force: remote_heads = ['force']
1593 if force: remote_heads = ['force']
1590 return remote.unbundle(cg, remote_heads, 'push')
1594 return remote.unbundle(cg, remote_heads, 'push')
1591 return ret[1]
1595 return ret[1]
1592
1596
1593 def changegroupinfo(self, nodes, source):
1597 def changegroupinfo(self, nodes, source):
1594 if self.ui.verbose or source == 'bundle':
1598 if self.ui.verbose or source == 'bundle':
1595 self.ui.status(_("%d changesets found\n") % len(nodes))
1599 self.ui.status(_("%d changesets found\n") % len(nodes))
1596 if self.ui.debugflag:
1600 if self.ui.debugflag:
1597 self.ui.debug("list of changesets:\n")
1601 self.ui.debug("list of changesets:\n")
1598 for node in nodes:
1602 for node in nodes:
1599 self.ui.debug("%s\n" % hex(node))
1603 self.ui.debug("%s\n" % hex(node))
1600
1604
1601 def changegroupsubset(self, bases, heads, source, extranodes=None):
1605 def changegroupsubset(self, bases, heads, source, extranodes=None):
1602 """Compute a changegroup consisting of all the nodes that are
1606 """Compute a changegroup consisting of all the nodes that are
1603 descendents of any of the bases and ancestors of any of the heads.
1607 descendents of any of the bases and ancestors of any of the heads.
1604 Return a chunkbuffer object whose read() method will return
1608 Return a chunkbuffer object whose read() method will return
1605 successive changegroup chunks.
1609 successive changegroup chunks.
1606
1610
1607 It is fairly complex as determining which filenodes and which
1611 It is fairly complex as determining which filenodes and which
1608 manifest nodes need to be included for the changeset to be complete
1612 manifest nodes need to be included for the changeset to be complete
1609 is non-trivial.
1613 is non-trivial.
1610
1614
1611 Another wrinkle is doing the reverse, figuring out which changeset in
1615 Another wrinkle is doing the reverse, figuring out which changeset in
1612 the changegroup a particular filenode or manifestnode belongs to.
1616 the changegroup a particular filenode or manifestnode belongs to.
1613
1617
1614 The caller can specify some nodes that must be included in the
1618 The caller can specify some nodes that must be included in the
1615 changegroup using the extranodes argument. It should be a dict
1619 changegroup using the extranodes argument. It should be a dict
1616 where the keys are the filenames (or 1 for the manifest), and the
1620 where the keys are the filenames (or 1 for the manifest), and the
1617 values are lists of (node, linknode) tuples, where node is a wanted
1621 values are lists of (node, linknode) tuples, where node is a wanted
1618 node and linknode is the changelog node that should be transmitted as
1622 node and linknode is the changelog node that should be transmitted as
1619 the linkrev.
1623 the linkrev.
1620 """
1624 """
1621
1625
1622 # Set up some initial variables
1626 # Set up some initial variables
1623 # Make it easy to refer to self.changelog
1627 # Make it easy to refer to self.changelog
1624 cl = self.changelog
1628 cl = self.changelog
1625 # msng is short for missing - compute the list of changesets in this
1629 # msng is short for missing - compute the list of changesets in this
1626 # changegroup.
1630 # changegroup.
1627 if not bases:
1631 if not bases:
1628 bases = [nullid]
1632 bases = [nullid]
1629 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1633 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1630
1634
1631 if extranodes is None:
1635 if extranodes is None:
1632 # can we go through the fast path ?
1636 # can we go through the fast path ?
1633 heads.sort()
1637 heads.sort()
1634 allheads = self.heads()
1638 allheads = self.heads()
1635 allheads.sort()
1639 allheads.sort()
1636 if heads == allheads:
1640 if heads == allheads:
1637 return self._changegroup(msng_cl_lst, source)
1641 return self._changegroup(msng_cl_lst, source)
1638
1642
1639 # slow path
1643 # slow path
1640 self.hook('preoutgoing', throw=True, source=source)
1644 self.hook('preoutgoing', throw=True, source=source)
1641
1645
1642 self.changegroupinfo(msng_cl_lst, source)
1646 self.changegroupinfo(msng_cl_lst, source)
1643 # Some bases may turn out to be superfluous, and some heads may be
1647 # Some bases may turn out to be superfluous, and some heads may be
1644 # too. nodesbetween will return the minimal set of bases and heads
1648 # too. nodesbetween will return the minimal set of bases and heads
1645 # necessary to re-create the changegroup.
1649 # necessary to re-create the changegroup.
1646
1650
1647 # Known heads are the list of heads that it is assumed the recipient
1651 # Known heads are the list of heads that it is assumed the recipient
1648 # of this changegroup will know about.
1652 # of this changegroup will know about.
1649 knownheads = set()
1653 knownheads = set()
1650 # We assume that all parents of bases are known heads.
1654 # We assume that all parents of bases are known heads.
1651 for n in bases:
1655 for n in bases:
1652 knownheads.update(cl.parents(n))
1656 knownheads.update(cl.parents(n))
1653 knownheads.discard(nullid)
1657 knownheads.discard(nullid)
1654 knownheads = list(knownheads)
1658 knownheads = list(knownheads)
1655 if knownheads:
1659 if knownheads:
1656 # Now that we know what heads are known, we can compute which
1660 # Now that we know what heads are known, we can compute which
1657 # changesets are known. The recipient must know about all
1661 # changesets are known. The recipient must know about all
1658 # changesets required to reach the known heads from the null
1662 # changesets required to reach the known heads from the null
1659 # changeset.
1663 # changeset.
1660 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1664 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1661 junk = None
1665 junk = None
1662 # Transform the list into a set.
1666 # Transform the list into a set.
1663 has_cl_set = set(has_cl_set)
1667 has_cl_set = set(has_cl_set)
1664 else:
1668 else:
1665 # If there were no known heads, the recipient cannot be assumed to
1669 # If there were no known heads, the recipient cannot be assumed to
1666 # know about any changesets.
1670 # know about any changesets.
1667 has_cl_set = set()
1671 has_cl_set = set()
1668
1672
1669 # Make it easy to refer to self.manifest
1673 # Make it easy to refer to self.manifest
1670 mnfst = self.manifest
1674 mnfst = self.manifest
1671 # We don't know which manifests are missing yet
1675 # We don't know which manifests are missing yet
1672 msng_mnfst_set = {}
1676 msng_mnfst_set = {}
1673 # Nor do we know which filenodes are missing.
1677 # Nor do we know which filenodes are missing.
1674 msng_filenode_set = {}
1678 msng_filenode_set = {}
1675
1679
1676 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1680 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1677 junk = None
1681 junk = None
1678
1682
1679 # A changeset always belongs to itself, so the changenode lookup
1683 # A changeset always belongs to itself, so the changenode lookup
1680 # function for a changenode is identity.
1684 # function for a changenode is identity.
1681 def identity(x):
1685 def identity(x):
1682 return x
1686 return x
1683
1687
1684 # If we determine that a particular file or manifest node must be a
1688 # If we determine that a particular file or manifest node must be a
1685 # node that the recipient of the changegroup will already have, we can
1689 # node that the recipient of the changegroup will already have, we can
1686 # also assume the recipient will have all the parents. This function
1690 # also assume the recipient will have all the parents. This function
1687 # prunes them from the set of missing nodes.
1691 # prunes them from the set of missing nodes.
1688 def prune_parents(revlog, hasset, msngset):
1692 def prune_parents(revlog, hasset, msngset):
1689 haslst = list(hasset)
1693 haslst = list(hasset)
1690 haslst.sort(key=revlog.rev)
1694 haslst.sort(key=revlog.rev)
1691 for node in haslst:
1695 for node in haslst:
1692 parentlst = [p for p in revlog.parents(node) if p != nullid]
1696 parentlst = [p for p in revlog.parents(node) if p != nullid]
1693 while parentlst:
1697 while parentlst:
1694 n = parentlst.pop()
1698 n = parentlst.pop()
1695 if n not in hasset:
1699 if n not in hasset:
1696 hasset.add(n)
1700 hasset.add(n)
1697 p = [p for p in revlog.parents(n) if p != nullid]
1701 p = [p for p in revlog.parents(n) if p != nullid]
1698 parentlst.extend(p)
1702 parentlst.extend(p)
1699 for n in hasset:
1703 for n in hasset:
1700 msngset.pop(n, None)
1704 msngset.pop(n, None)
1701
1705
1702 # This is a function generating function used to set up an environment
1706 # This is a function generating function used to set up an environment
1703 # for the inner function to execute in.
1707 # for the inner function to execute in.
1704 def manifest_and_file_collector(changedfileset):
1708 def manifest_and_file_collector(changedfileset):
1705 # This is an information gathering function that gathers
1709 # This is an information gathering function that gathers
1706 # information from each changeset node that goes out as part of
1710 # information from each changeset node that goes out as part of
1707 # the changegroup. The information gathered is a list of which
1711 # the changegroup. The information gathered is a list of which
1708 # manifest nodes are potentially required (the recipient may
1712 # manifest nodes are potentially required (the recipient may
1709 # already have them) and total list of all files which were
1713 # already have them) and total list of all files which were
1710 # changed in any changeset in the changegroup.
1714 # changed in any changeset in the changegroup.
1711 #
1715 #
1712 # We also remember the first changenode we saw any manifest
1716 # We also remember the first changenode we saw any manifest
1713 # referenced by so we can later determine which changenode 'owns'
1717 # referenced by so we can later determine which changenode 'owns'
1714 # the manifest.
1718 # the manifest.
1715 def collect_manifests_and_files(clnode):
1719 def collect_manifests_and_files(clnode):
1716 c = cl.read(clnode)
1720 c = cl.read(clnode)
1717 for f in c[3]:
1721 for f in c[3]:
1718 # This is to make sure we only have one instance of each
1722 # This is to make sure we only have one instance of each
1719 # filename string for each filename.
1723 # filename string for each filename.
1720 changedfileset.setdefault(f, f)
1724 changedfileset.setdefault(f, f)
1721 msng_mnfst_set.setdefault(c[0], clnode)
1725 msng_mnfst_set.setdefault(c[0], clnode)
1722 return collect_manifests_and_files
1726 return collect_manifests_and_files
1723
1727
1724 # Figure out which manifest nodes (of the ones we think might be part
1728 # Figure out which manifest nodes (of the ones we think might be part
1725 # of the changegroup) the recipient must know about and remove them
1729 # of the changegroup) the recipient must know about and remove them
1726 # from the changegroup.
1730 # from the changegroup.
1727 def prune_manifests():
1731 def prune_manifests():
1728 has_mnfst_set = set()
1732 has_mnfst_set = set()
1729 for n in msng_mnfst_set:
1733 for n in msng_mnfst_set:
1730 # If a 'missing' manifest thinks it belongs to a changenode
1734 # If a 'missing' manifest thinks it belongs to a changenode
1731 # the recipient is assumed to have, obviously the recipient
1735 # the recipient is assumed to have, obviously the recipient
1732 # must have that manifest.
1736 # must have that manifest.
1733 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1737 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1734 if linknode in has_cl_set:
1738 if linknode in has_cl_set:
1735 has_mnfst_set.add(n)
1739 has_mnfst_set.add(n)
1736 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1740 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1737
1741
1738 # Use the information collected in collect_manifests_and_files to say
1742 # Use the information collected in collect_manifests_and_files to say
1739 # which changenode any manifestnode belongs to.
1743 # which changenode any manifestnode belongs to.
1740 def lookup_manifest_link(mnfstnode):
1744 def lookup_manifest_link(mnfstnode):
1741 return msng_mnfst_set[mnfstnode]
1745 return msng_mnfst_set[mnfstnode]
1742
1746
1743 # A function generating function that sets up the initial environment
1747 # A function generating function that sets up the initial environment
1744 # the inner function.
1748 # the inner function.
1745 def filenode_collector(changedfiles):
1749 def filenode_collector(changedfiles):
1746 next_rev = [0]
1750 next_rev = [0]
1747 # This gathers information from each manifestnode included in the
1751 # This gathers information from each manifestnode included in the
1748 # changegroup about which filenodes the manifest node references
1752 # changegroup about which filenodes the manifest node references
1749 # so we can include those in the changegroup too.
1753 # so we can include those in the changegroup too.
1750 #
1754 #
1751 # It also remembers which changenode each filenode belongs to. It
1755 # It also remembers which changenode each filenode belongs to. It
1752 # does this by assuming the a filenode belongs to the changenode
1756 # does this by assuming the a filenode belongs to the changenode
1753 # the first manifest that references it belongs to.
1757 # the first manifest that references it belongs to.
1754 def collect_msng_filenodes(mnfstnode):
1758 def collect_msng_filenodes(mnfstnode):
1755 r = mnfst.rev(mnfstnode)
1759 r = mnfst.rev(mnfstnode)
1756 if r == next_rev[0]:
1760 if r == next_rev[0]:
1757 # If the last rev we looked at was the one just previous,
1761 # If the last rev we looked at was the one just previous,
1758 # we only need to see a diff.
1762 # we only need to see a diff.
1759 deltamf = mnfst.readdelta(mnfstnode)
1763 deltamf = mnfst.readdelta(mnfstnode)
1760 # For each line in the delta
1764 # For each line in the delta
1761 for f, fnode in deltamf.iteritems():
1765 for f, fnode in deltamf.iteritems():
1762 f = changedfiles.get(f, None)
1766 f = changedfiles.get(f, None)
1763 # And if the file is in the list of files we care
1767 # And if the file is in the list of files we care
1764 # about.
1768 # about.
1765 if f is not None:
1769 if f is not None:
1766 # Get the changenode this manifest belongs to
1770 # Get the changenode this manifest belongs to
1767 clnode = msng_mnfst_set[mnfstnode]
1771 clnode = msng_mnfst_set[mnfstnode]
1768 # Create the set of filenodes for the file if
1772 # Create the set of filenodes for the file if
1769 # there isn't one already.
1773 # there isn't one already.
1770 ndset = msng_filenode_set.setdefault(f, {})
1774 ndset = msng_filenode_set.setdefault(f, {})
1771 # And set the filenode's changelog node to the
1775 # And set the filenode's changelog node to the
1772 # manifest's if it hasn't been set already.
1776 # manifest's if it hasn't been set already.
1773 ndset.setdefault(fnode, clnode)
1777 ndset.setdefault(fnode, clnode)
1774 else:
1778 else:
1775 # Otherwise we need a full manifest.
1779 # Otherwise we need a full manifest.
1776 m = mnfst.read(mnfstnode)
1780 m = mnfst.read(mnfstnode)
1777 # For every file in we care about.
1781 # For every file in we care about.
1778 for f in changedfiles:
1782 for f in changedfiles:
1779 fnode = m.get(f, None)
1783 fnode = m.get(f, None)
1780 # If it's in the manifest
1784 # If it's in the manifest
1781 if fnode is not None:
1785 if fnode is not None:
1782 # See comments above.
1786 # See comments above.
1783 clnode = msng_mnfst_set[mnfstnode]
1787 clnode = msng_mnfst_set[mnfstnode]
1784 ndset = msng_filenode_set.setdefault(f, {})
1788 ndset = msng_filenode_set.setdefault(f, {})
1785 ndset.setdefault(fnode, clnode)
1789 ndset.setdefault(fnode, clnode)
1786 # Remember the revision we hope to see next.
1790 # Remember the revision we hope to see next.
1787 next_rev[0] = r + 1
1791 next_rev[0] = r + 1
1788 return collect_msng_filenodes
1792 return collect_msng_filenodes
1789
1793
1790 # We have a list of filenodes we think we need for a file, lets remove
1794 # We have a list of filenodes we think we need for a file, lets remove
1791 # all those we know the recipient must have.
1795 # all those we know the recipient must have.
1792 def prune_filenodes(f, filerevlog):
1796 def prune_filenodes(f, filerevlog):
1793 msngset = msng_filenode_set[f]
1797 msngset = msng_filenode_set[f]
1794 hasset = set()
1798 hasset = set()
1795 # If a 'missing' filenode thinks it belongs to a changenode we
1799 # If a 'missing' filenode thinks it belongs to a changenode we
1796 # assume the recipient must have, then the recipient must have
1800 # assume the recipient must have, then the recipient must have
1797 # that filenode.
1801 # that filenode.
1798 for n in msngset:
1802 for n in msngset:
1799 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1803 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1800 if clnode in has_cl_set:
1804 if clnode in has_cl_set:
1801 hasset.add(n)
1805 hasset.add(n)
1802 prune_parents(filerevlog, hasset, msngset)
1806 prune_parents(filerevlog, hasset, msngset)
1803
1807
1804 # A function generator function that sets up the a context for the
1808 # A function generator function that sets up the a context for the
1805 # inner function.
1809 # inner function.
1806 def lookup_filenode_link_func(fname):
1810 def lookup_filenode_link_func(fname):
1807 msngset = msng_filenode_set[fname]
1811 msngset = msng_filenode_set[fname]
1808 # Lookup the changenode the filenode belongs to.
1812 # Lookup the changenode the filenode belongs to.
1809 def lookup_filenode_link(fnode):
1813 def lookup_filenode_link(fnode):
1810 return msngset[fnode]
1814 return msngset[fnode]
1811 return lookup_filenode_link
1815 return lookup_filenode_link
1812
1816
1813 # Add the nodes that were explicitly requested.
1817 # Add the nodes that were explicitly requested.
1814 def add_extra_nodes(name, nodes):
1818 def add_extra_nodes(name, nodes):
1815 if not extranodes or name not in extranodes:
1819 if not extranodes or name not in extranodes:
1816 return
1820 return
1817
1821
1818 for node, linknode in extranodes[name]:
1822 for node, linknode in extranodes[name]:
1819 if node not in nodes:
1823 if node not in nodes:
1820 nodes[node] = linknode
1824 nodes[node] = linknode
1821
1825
1822 # Now that we have all theses utility functions to help out and
1826 # Now that we have all theses utility functions to help out and
1823 # logically divide up the task, generate the group.
1827 # logically divide up the task, generate the group.
1824 def gengroup():
1828 def gengroup():
1825 # The set of changed files starts empty.
1829 # The set of changed files starts empty.
1826 changedfiles = {}
1830 changedfiles = {}
1827 # Create a changenode group generator that will call our functions
1831 # Create a changenode group generator that will call our functions
1828 # back to lookup the owning changenode and collect information.
1832 # back to lookup the owning changenode and collect information.
1829 group = cl.group(msng_cl_lst, identity,
1833 group = cl.group(msng_cl_lst, identity,
1830 manifest_and_file_collector(changedfiles))
1834 manifest_and_file_collector(changedfiles))
1831 for chnk in group:
1835 for chnk in group:
1832 yield chnk
1836 yield chnk
1833
1837
1834 # The list of manifests has been collected by the generator
1838 # The list of manifests has been collected by the generator
1835 # calling our functions back.
1839 # calling our functions back.
1836 prune_manifests()
1840 prune_manifests()
1837 add_extra_nodes(1, msng_mnfst_set)
1841 add_extra_nodes(1, msng_mnfst_set)
1838 msng_mnfst_lst = msng_mnfst_set.keys()
1842 msng_mnfst_lst = msng_mnfst_set.keys()
1839 # Sort the manifestnodes by revision number.
1843 # Sort the manifestnodes by revision number.
1840 msng_mnfst_lst.sort(key=mnfst.rev)
1844 msng_mnfst_lst.sort(key=mnfst.rev)
1841 # Create a generator for the manifestnodes that calls our lookup
1845 # Create a generator for the manifestnodes that calls our lookup
1842 # and data collection functions back.
1846 # and data collection functions back.
1843 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1847 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1844 filenode_collector(changedfiles))
1848 filenode_collector(changedfiles))
1845 for chnk in group:
1849 for chnk in group:
1846 yield chnk
1850 yield chnk
1847
1851
1848 # These are no longer needed, dereference and toss the memory for
1852 # These are no longer needed, dereference and toss the memory for
1849 # them.
1853 # them.
1850 msng_mnfst_lst = None
1854 msng_mnfst_lst = None
1851 msng_mnfst_set.clear()
1855 msng_mnfst_set.clear()
1852
1856
1853 if extranodes:
1857 if extranodes:
1854 for fname in extranodes:
1858 for fname in extranodes:
1855 if isinstance(fname, int):
1859 if isinstance(fname, int):
1856 continue
1860 continue
1857 msng_filenode_set.setdefault(fname, {})
1861 msng_filenode_set.setdefault(fname, {})
1858 changedfiles[fname] = 1
1862 changedfiles[fname] = 1
1859 # Go through all our files in order sorted by name.
1863 # Go through all our files in order sorted by name.
1860 for fname in sorted(changedfiles):
1864 for fname in sorted(changedfiles):
1861 filerevlog = self.file(fname)
1865 filerevlog = self.file(fname)
1862 if not len(filerevlog):
1866 if not len(filerevlog):
1863 raise util.Abort(_("empty or missing revlog for %s") % fname)
1867 raise util.Abort(_("empty or missing revlog for %s") % fname)
1864 # Toss out the filenodes that the recipient isn't really
1868 # Toss out the filenodes that the recipient isn't really
1865 # missing.
1869 # missing.
1866 if fname in msng_filenode_set:
1870 if fname in msng_filenode_set:
1867 prune_filenodes(fname, filerevlog)
1871 prune_filenodes(fname, filerevlog)
1868 add_extra_nodes(fname, msng_filenode_set[fname])
1872 add_extra_nodes(fname, msng_filenode_set[fname])
1869 msng_filenode_lst = msng_filenode_set[fname].keys()
1873 msng_filenode_lst = msng_filenode_set[fname].keys()
1870 else:
1874 else:
1871 msng_filenode_lst = []
1875 msng_filenode_lst = []
1872 # If any filenodes are left, generate the group for them,
1876 # If any filenodes are left, generate the group for them,
1873 # otherwise don't bother.
1877 # otherwise don't bother.
1874 if len(msng_filenode_lst) > 0:
1878 if len(msng_filenode_lst) > 0:
1875 yield changegroup.chunkheader(len(fname))
1879 yield changegroup.chunkheader(len(fname))
1876 yield fname
1880 yield fname
1877 # Sort the filenodes by their revision #
1881 # Sort the filenodes by their revision #
1878 msng_filenode_lst.sort(key=filerevlog.rev)
1882 msng_filenode_lst.sort(key=filerevlog.rev)
1879 # Create a group generator and only pass in a changenode
1883 # Create a group generator and only pass in a changenode
1880 # lookup function as we need to collect no information
1884 # lookup function as we need to collect no information
1881 # from filenodes.
1885 # from filenodes.
1882 group = filerevlog.group(msng_filenode_lst,
1886 group = filerevlog.group(msng_filenode_lst,
1883 lookup_filenode_link_func(fname))
1887 lookup_filenode_link_func(fname))
1884 for chnk in group:
1888 for chnk in group:
1885 yield chnk
1889 yield chnk
1886 if fname in msng_filenode_set:
1890 if fname in msng_filenode_set:
1887 # Don't need this anymore, toss it to free memory.
1891 # Don't need this anymore, toss it to free memory.
1888 del msng_filenode_set[fname]
1892 del msng_filenode_set[fname]
1889 # Signal that no more groups are left.
1893 # Signal that no more groups are left.
1890 yield changegroup.closechunk()
1894 yield changegroup.closechunk()
1891
1895
1892 if msng_cl_lst:
1896 if msng_cl_lst:
1893 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1897 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1894
1898
1895 return util.chunkbuffer(gengroup())
1899 return util.chunkbuffer(gengroup())
1896
1900
1897 def changegroup(self, basenodes, source):
1901 def changegroup(self, basenodes, source):
1898 # to avoid a race we use changegroupsubset() (issue1320)
1902 # to avoid a race we use changegroupsubset() (issue1320)
1899 return self.changegroupsubset(basenodes, self.heads(), source)
1903 return self.changegroupsubset(basenodes, self.heads(), source)
1900
1904
1901 def _changegroup(self, nodes, source):
1905 def _changegroup(self, nodes, source):
1902 """Compute the changegroup of all nodes that we have that a recipient
1906 """Compute the changegroup of all nodes that we have that a recipient
1903 doesn't. Return a chunkbuffer object whose read() method will return
1907 doesn't. Return a chunkbuffer object whose read() method will return
1904 successive changegroup chunks.
1908 successive changegroup chunks.
1905
1909
1906 This is much easier than the previous function as we can assume that
1910 This is much easier than the previous function as we can assume that
1907 the recipient has any changenode we aren't sending them.
1911 the recipient has any changenode we aren't sending them.
1908
1912
1909 nodes is the set of nodes to send"""
1913 nodes is the set of nodes to send"""
1910
1914
1911 self.hook('preoutgoing', throw=True, source=source)
1915 self.hook('preoutgoing', throw=True, source=source)
1912
1916
1913 cl = self.changelog
1917 cl = self.changelog
1914 revset = set([cl.rev(n) for n in nodes])
1918 revset = set([cl.rev(n) for n in nodes])
1915 self.changegroupinfo(nodes, source)
1919 self.changegroupinfo(nodes, source)
1916
1920
1917 def identity(x):
1921 def identity(x):
1918 return x
1922 return x
1919
1923
1920 def gennodelst(log):
1924 def gennodelst(log):
1921 for r in log:
1925 for r in log:
1922 if log.linkrev(r) in revset:
1926 if log.linkrev(r) in revset:
1923 yield log.node(r)
1927 yield log.node(r)
1924
1928
1925 def changed_file_collector(changedfileset):
1929 def changed_file_collector(changedfileset):
1926 def collect_changed_files(clnode):
1930 def collect_changed_files(clnode):
1927 c = cl.read(clnode)
1931 c = cl.read(clnode)
1928 changedfileset.update(c[3])
1932 changedfileset.update(c[3])
1929 return collect_changed_files
1933 return collect_changed_files
1930
1934
1931 def lookuprevlink_func(revlog):
1935 def lookuprevlink_func(revlog):
1932 def lookuprevlink(n):
1936 def lookuprevlink(n):
1933 return cl.node(revlog.linkrev(revlog.rev(n)))
1937 return cl.node(revlog.linkrev(revlog.rev(n)))
1934 return lookuprevlink
1938 return lookuprevlink
1935
1939
1936 def gengroup():
1940 def gengroup():
1937 '''yield a sequence of changegroup chunks (strings)'''
1941 '''yield a sequence of changegroup chunks (strings)'''
1938 # construct a list of all changed files
1942 # construct a list of all changed files
1939 changedfiles = set()
1943 changedfiles = set()
1940
1944
1941 for chnk in cl.group(nodes, identity,
1945 for chnk in cl.group(nodes, identity,
1942 changed_file_collector(changedfiles)):
1946 changed_file_collector(changedfiles)):
1943 yield chnk
1947 yield chnk
1944
1948
1945 mnfst = self.manifest
1949 mnfst = self.manifest
1946 nodeiter = gennodelst(mnfst)
1950 nodeiter = gennodelst(mnfst)
1947 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1951 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1948 yield chnk
1952 yield chnk
1949
1953
1950 for fname in sorted(changedfiles):
1954 for fname in sorted(changedfiles):
1951 filerevlog = self.file(fname)
1955 filerevlog = self.file(fname)
1952 if not len(filerevlog):
1956 if not len(filerevlog):
1953 raise util.Abort(_("empty or missing revlog for %s") % fname)
1957 raise util.Abort(_("empty or missing revlog for %s") % fname)
1954 nodeiter = gennodelst(filerevlog)
1958 nodeiter = gennodelst(filerevlog)
1955 nodeiter = list(nodeiter)
1959 nodeiter = list(nodeiter)
1956 if nodeiter:
1960 if nodeiter:
1957 yield changegroup.chunkheader(len(fname))
1961 yield changegroup.chunkheader(len(fname))
1958 yield fname
1962 yield fname
1959 lookup = lookuprevlink_func(filerevlog)
1963 lookup = lookuprevlink_func(filerevlog)
1960 for chnk in filerevlog.group(nodeiter, lookup):
1964 for chnk in filerevlog.group(nodeiter, lookup):
1961 yield chnk
1965 yield chnk
1962
1966
1963 yield changegroup.closechunk()
1967 yield changegroup.closechunk()
1964
1968
1965 if nodes:
1969 if nodes:
1966 self.hook('outgoing', node=hex(nodes[0]), source=source)
1970 self.hook('outgoing', node=hex(nodes[0]), source=source)
1967
1971
1968 return util.chunkbuffer(gengroup())
1972 return util.chunkbuffer(gengroup())
1969
1973
1970 def addchangegroup(self, source, srctype, url, emptyok=False):
1974 def addchangegroup(self, source, srctype, url, emptyok=False):
1971 """add changegroup to repo.
1975 """add changegroup to repo.
1972
1976
1973 return values:
1977 return values:
1974 - nothing changed or no source: 0
1978 - nothing changed or no source: 0
1975 - more heads than before: 1+added heads (2..n)
1979 - more heads than before: 1+added heads (2..n)
1976 - less heads than before: -1-removed heads (-2..-n)
1980 - less heads than before: -1-removed heads (-2..-n)
1977 - number of heads stays the same: 1
1981 - number of heads stays the same: 1
1978 """
1982 """
1979 def csmap(x):
1983 def csmap(x):
1980 self.ui.debug("add changeset %s\n" % short(x))
1984 self.ui.debug("add changeset %s\n" % short(x))
1981 return len(cl)
1985 return len(cl)
1982
1986
1983 def revmap(x):
1987 def revmap(x):
1984 return cl.rev(x)
1988 return cl.rev(x)
1985
1989
1986 if not source:
1990 if not source:
1987 return 0
1991 return 0
1988
1992
1989 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1993 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1990
1994
1991 changesets = files = revisions = 0
1995 changesets = files = revisions = 0
1992
1996
1993 # write changelog data to temp files so concurrent readers will not see
1997 # write changelog data to temp files so concurrent readers will not see
1994 # inconsistent view
1998 # inconsistent view
1995 cl = self.changelog
1999 cl = self.changelog
1996 cl.delayupdate()
2000 cl.delayupdate()
1997 oldheads = len(cl.heads())
2001 oldheads = len(cl.heads())
1998
2002
1999 tr = self.transaction()
2003 tr = self.transaction()
2000 try:
2004 try:
2001 trp = weakref.proxy(tr)
2005 trp = weakref.proxy(tr)
2002 # pull off the changeset group
2006 # pull off the changeset group
2003 self.ui.status(_("adding changesets\n"))
2007 self.ui.status(_("adding changesets\n"))
2004 clstart = len(cl)
2008 clstart = len(cl)
2005 chunkiter = changegroup.chunkiter(source)
2009 chunkiter = changegroup.chunkiter(source)
2006 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2010 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2007 raise util.Abort(_("received changelog group is empty"))
2011 raise util.Abort(_("received changelog group is empty"))
2008 clend = len(cl)
2012 clend = len(cl)
2009 changesets = clend - clstart
2013 changesets = clend - clstart
2010
2014
2011 # pull off the manifest group
2015 # pull off the manifest group
2012 self.ui.status(_("adding manifests\n"))
2016 self.ui.status(_("adding manifests\n"))
2013 chunkiter = changegroup.chunkiter(source)
2017 chunkiter = changegroup.chunkiter(source)
2014 # no need to check for empty manifest group here:
2018 # no need to check for empty manifest group here:
2015 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2019 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2016 # no new manifest will be created and the manifest group will
2020 # no new manifest will be created and the manifest group will
2017 # be empty during the pull
2021 # be empty during the pull
2018 self.manifest.addgroup(chunkiter, revmap, trp)
2022 self.manifest.addgroup(chunkiter, revmap, trp)
2019
2023
2020 # process the files
2024 # process the files
2021 self.ui.status(_("adding file changes\n"))
2025 self.ui.status(_("adding file changes\n"))
2022 while 1:
2026 while 1:
2023 f = changegroup.getchunk(source)
2027 f = changegroup.getchunk(source)
2024 if not f:
2028 if not f:
2025 break
2029 break
2026 self.ui.debug("adding %s revisions\n" % f)
2030 self.ui.debug("adding %s revisions\n" % f)
2027 fl = self.file(f)
2031 fl = self.file(f)
2028 o = len(fl)
2032 o = len(fl)
2029 chunkiter = changegroup.chunkiter(source)
2033 chunkiter = changegroup.chunkiter(source)
2030 if fl.addgroup(chunkiter, revmap, trp) is None:
2034 if fl.addgroup(chunkiter, revmap, trp) is None:
2031 raise util.Abort(_("received file revlog group is empty"))
2035 raise util.Abort(_("received file revlog group is empty"))
2032 revisions += len(fl) - o
2036 revisions += len(fl) - o
2033 files += 1
2037 files += 1
2034
2038
2035 newheads = len(cl.heads())
2039 newheads = len(cl.heads())
2036 heads = ""
2040 heads = ""
2037 if oldheads and newheads != oldheads:
2041 if oldheads and newheads != oldheads:
2038 heads = _(" (%+d heads)") % (newheads - oldheads)
2042 heads = _(" (%+d heads)") % (newheads - oldheads)
2039
2043
2040 self.ui.status(_("added %d changesets"
2044 self.ui.status(_("added %d changesets"
2041 " with %d changes to %d files%s\n")
2045 " with %d changes to %d files%s\n")
2042 % (changesets, revisions, files, heads))
2046 % (changesets, revisions, files, heads))
2043
2047
2044 if changesets > 0:
2048 if changesets > 0:
2045 p = lambda: cl.writepending() and self.root or ""
2049 p = lambda: cl.writepending() and self.root or ""
2046 self.hook('pretxnchangegroup', throw=True,
2050 self.hook('pretxnchangegroup', throw=True,
2047 node=hex(cl.node(clstart)), source=srctype,
2051 node=hex(cl.node(clstart)), source=srctype,
2048 url=url, pending=p)
2052 url=url, pending=p)
2049
2053
2050 # make changelog see real files again
2054 # make changelog see real files again
2051 cl.finalize(trp)
2055 cl.finalize(trp)
2052
2056
2053 tr.close()
2057 tr.close()
2054 finally:
2058 finally:
2055 del tr
2059 del tr
2056
2060
2057 if changesets > 0:
2061 if changesets > 0:
2058 # forcefully update the on-disk branch cache
2062 # forcefully update the on-disk branch cache
2059 self.ui.debug("updating the branch cache\n")
2063 self.ui.debug("updating the branch cache\n")
2060 self.branchtags()
2064 self.branchtags()
2061 self.hook("changegroup", node=hex(cl.node(clstart)),
2065 self.hook("changegroup", node=hex(cl.node(clstart)),
2062 source=srctype, url=url)
2066 source=srctype, url=url)
2063
2067
2064 for i in xrange(clstart, clend):
2068 for i in xrange(clstart, clend):
2065 self.hook("incoming", node=hex(cl.node(i)),
2069 self.hook("incoming", node=hex(cl.node(i)),
2066 source=srctype, url=url)
2070 source=srctype, url=url)
2067
2071
2068 # never return 0 here:
2072 # never return 0 here:
2069 if newheads < oldheads:
2073 if newheads < oldheads:
2070 return newheads - oldheads - 1
2074 return newheads - oldheads - 1
2071 else:
2075 else:
2072 return newheads - oldheads + 1
2076 return newheads - oldheads + 1
2073
2077
2074
2078
2075 def stream_in(self, remote):
2079 def stream_in(self, remote):
2076 fp = remote.stream_out()
2080 fp = remote.stream_out()
2077 l = fp.readline()
2081 l = fp.readline()
2078 try:
2082 try:
2079 resp = int(l)
2083 resp = int(l)
2080 except ValueError:
2084 except ValueError:
2081 raise error.ResponseError(
2085 raise error.ResponseError(
2082 _('Unexpected response from remote server:'), l)
2086 _('Unexpected response from remote server:'), l)
2083 if resp == 1:
2087 if resp == 1:
2084 raise util.Abort(_('operation forbidden by server'))
2088 raise util.Abort(_('operation forbidden by server'))
2085 elif resp == 2:
2089 elif resp == 2:
2086 raise util.Abort(_('locking the remote repository failed'))
2090 raise util.Abort(_('locking the remote repository failed'))
2087 elif resp != 0:
2091 elif resp != 0:
2088 raise util.Abort(_('the server sent an unknown error code'))
2092 raise util.Abort(_('the server sent an unknown error code'))
2089 self.ui.status(_('streaming all changes\n'))
2093 self.ui.status(_('streaming all changes\n'))
2090 l = fp.readline()
2094 l = fp.readline()
2091 try:
2095 try:
2092 total_files, total_bytes = map(int, l.split(' ', 1))
2096 total_files, total_bytes = map(int, l.split(' ', 1))
2093 except (ValueError, TypeError):
2097 except (ValueError, TypeError):
2094 raise error.ResponseError(
2098 raise error.ResponseError(
2095 _('Unexpected response from remote server:'), l)
2099 _('Unexpected response from remote server:'), l)
2096 self.ui.status(_('%d files to transfer, %s of data\n') %
2100 self.ui.status(_('%d files to transfer, %s of data\n') %
2097 (total_files, util.bytecount(total_bytes)))
2101 (total_files, util.bytecount(total_bytes)))
2098 start = time.time()
2102 start = time.time()
2099 for i in xrange(total_files):
2103 for i in xrange(total_files):
2100 # XXX doesn't support '\n' or '\r' in filenames
2104 # XXX doesn't support '\n' or '\r' in filenames
2101 l = fp.readline()
2105 l = fp.readline()
2102 try:
2106 try:
2103 name, size = l.split('\0', 1)
2107 name, size = l.split('\0', 1)
2104 size = int(size)
2108 size = int(size)
2105 except (ValueError, TypeError):
2109 except (ValueError, TypeError):
2106 raise error.ResponseError(
2110 raise error.ResponseError(
2107 _('Unexpected response from remote server:'), l)
2111 _('Unexpected response from remote server:'), l)
2108 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2112 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2109 # for backwards compat, name was partially encoded
2113 # for backwards compat, name was partially encoded
2110 ofp = self.sopener(store.decodedir(name), 'w')
2114 ofp = self.sopener(store.decodedir(name), 'w')
2111 for chunk in util.filechunkiter(fp, limit=size):
2115 for chunk in util.filechunkiter(fp, limit=size):
2112 ofp.write(chunk)
2116 ofp.write(chunk)
2113 ofp.close()
2117 ofp.close()
2114 elapsed = time.time() - start
2118 elapsed = time.time() - start
2115 if elapsed <= 0:
2119 if elapsed <= 0:
2116 elapsed = 0.001
2120 elapsed = 0.001
2117 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2121 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2118 (util.bytecount(total_bytes), elapsed,
2122 (util.bytecount(total_bytes), elapsed,
2119 util.bytecount(total_bytes / elapsed)))
2123 util.bytecount(total_bytes / elapsed)))
2120 self.invalidate()
2124 self.invalidate()
2121 return len(self.heads()) + 1
2125 return len(self.heads()) + 1
2122
2126
2123 def clone(self, remote, heads=[], stream=False):
2127 def clone(self, remote, heads=[], stream=False):
2124 '''clone remote repository.
2128 '''clone remote repository.
2125
2129
2126 keyword arguments:
2130 keyword arguments:
2127 heads: list of revs to clone (forces use of pull)
2131 heads: list of revs to clone (forces use of pull)
2128 stream: use streaming clone if possible'''
2132 stream: use streaming clone if possible'''
2129
2133
2130 # now, all clients that can request uncompressed clones can
2134 # now, all clients that can request uncompressed clones can
2131 # read repo formats supported by all servers that can serve
2135 # read repo formats supported by all servers that can serve
2132 # them.
2136 # them.
2133
2137
2134 # if revlog format changes, client will have to check version
2138 # if revlog format changes, client will have to check version
2135 # and format flags on "stream" capability, and use
2139 # and format flags on "stream" capability, and use
2136 # uncompressed only if compatible.
2140 # uncompressed only if compatible.
2137
2141
2138 if stream and not heads and remote.capable('stream'):
2142 if stream and not heads and remote.capable('stream'):
2139 return self.stream_in(remote)
2143 return self.stream_in(remote)
2140 return self.pull(remote, heads)
2144 return self.pull(remote, heads)
2141
2145
2142 # used to avoid circular references so destructors work
2146 # used to avoid circular references so destructors work
2143 def aftertrans(files):
2147 def aftertrans(files):
2144 renamefiles = [tuple(t) for t in files]
2148 renamefiles = [tuple(t) for t in files]
2145 def a():
2149 def a():
2146 for src, dest in renamefiles:
2150 for src, dest in renamefiles:
2147 util.rename(src, dest)
2151 util.rename(src, dest)
2148 return a
2152 return a
2149
2153
2150 def instance(ui, path, create):
2154 def instance(ui, path, create):
2151 return localrepository(ui, util.drop_scheme('file', path), create)
2155 return localrepository(ui, util.drop_scheme('file', path), create)
2152
2156
2153 def islocal(path):
2157 def islocal(path):
2154 return True
2158 return True
General Comments 0
You need to be logged in to leave comments. Login now