##// END OF EJS Templates
commit: save commit message so it's not destroyed by rollback....
Greg Ward -
r9934:720f70b7 default
parent child Browse files
Show More
@@ -1,2164 +1,2173 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self._branchcache = None # in UTF-8
101 self._branchcache = None # in UTF-8
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.nodetagscache = None
103 self.nodetagscache = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 @propertycache
108 @propertycache
109 def changelog(self):
109 def changelog(self):
110 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
113 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
116 return c
116 return c
117
117
118 @propertycache
118 @propertycache
119 def manifest(self):
119 def manifest(self):
120 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
121
121
122 @propertycache
122 @propertycache
123 def dirstate(self):
123 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
127 if changeid is None:
127 if changeid is None:
128 return context.workingctx(self)
128 return context.workingctx(self)
129 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
130
130
131 def __contains__(self, changeid):
131 def __contains__(self, changeid):
132 try:
132 try:
133 return bool(self.lookup(changeid))
133 return bool(self.lookup(changeid))
134 except error.RepoLookupError:
134 except error.RepoLookupError:
135 return False
135 return False
136
136
137 def __nonzero__(self):
137 def __nonzero__(self):
138 return True
138 return True
139
139
140 def __len__(self):
140 def __len__(self):
141 return len(self.changelog)
141 return len(self.changelog)
142
142
143 def __iter__(self):
143 def __iter__(self):
144 for i in xrange(len(self)):
144 for i in xrange(len(self)):
145 yield i
145 yield i
146
146
147 def url(self):
147 def url(self):
148 return 'file:' + self.root
148 return 'file:' + self.root
149
149
150 def hook(self, name, throw=False, **args):
150 def hook(self, name, throw=False, **args):
151 return hook.hook(self.ui, self, name, throw, **args)
151 return hook.hook(self.ui, self, name, throw, **args)
152
152
153 tag_disallowed = ':\r\n'
153 tag_disallowed = ':\r\n'
154
154
155 def _tag(self, names, node, message, local, user, date, extra={}):
155 def _tag(self, names, node, message, local, user, date, extra={}):
156 if isinstance(names, str):
156 if isinstance(names, str):
157 allchars = names
157 allchars = names
158 names = (names,)
158 names = (names,)
159 else:
159 else:
160 allchars = ''.join(names)
160 allchars = ''.join(names)
161 for c in self.tag_disallowed:
161 for c in self.tag_disallowed:
162 if c in allchars:
162 if c in allchars:
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
164
164
165 for name in names:
165 for name in names:
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 local=local)
167 local=local)
168
168
169 def writetags(fp, names, munge, prevtags):
169 def writetags(fp, names, munge, prevtags):
170 fp.seek(0, 2)
170 fp.seek(0, 2)
171 if prevtags and prevtags[-1] != '\n':
171 if prevtags and prevtags[-1] != '\n':
172 fp.write('\n')
172 fp.write('\n')
173 for name in names:
173 for name in names:
174 m = munge and munge(name) or name
174 m = munge and munge(name) or name
175 if self._tagtypes and name in self._tagtypes:
175 if self._tagtypes and name in self._tagtypes:
176 old = self._tags.get(name, nullid)
176 old = self._tags.get(name, nullid)
177 fp.write('%s %s\n' % (hex(old), m))
177 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(node), m))
178 fp.write('%s %s\n' % (hex(node), m))
179 fp.close()
179 fp.close()
180
180
181 prevtags = ''
181 prevtags = ''
182 if local:
182 if local:
183 try:
183 try:
184 fp = self.opener('localtags', 'r+')
184 fp = self.opener('localtags', 'r+')
185 except IOError:
185 except IOError:
186 fp = self.opener('localtags', 'a')
186 fp = self.opener('localtags', 'a')
187 else:
187 else:
188 prevtags = fp.read()
188 prevtags = fp.read()
189
189
190 # local tags are stored in the current charset
190 # local tags are stored in the current charset
191 writetags(fp, names, None, prevtags)
191 writetags(fp, names, None, prevtags)
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194 return
194 return
195
195
196 try:
196 try:
197 fp = self.wfile('.hgtags', 'rb+')
197 fp = self.wfile('.hgtags', 'rb+')
198 except IOError:
198 except IOError:
199 fp = self.wfile('.hgtags', 'ab')
199 fp = self.wfile('.hgtags', 'ab')
200 else:
200 else:
201 prevtags = fp.read()
201 prevtags = fp.read()
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, encoding.fromlocal, prevtags)
204 writetags(fp, names, encoding.fromlocal, prevtags)
205
205
206 if '.hgtags' not in self.dirstate:
206 if '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 m = match_.exact(self.root, '', ['.hgtags'])
209 m = match_.exact(self.root, '', ['.hgtags'])
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self.tags() # instantiate the cache
243 self.tags() # instantiate the cache
244 self._tag(names, node, message, local, user, date)
244 self._tag(names, node, message, local, user, date)
245
245
246 def tags(self):
246 def tags(self):
247 '''return a mapping of tag to node'''
247 '''return a mapping of tag to node'''
248 if self._tags is None:
248 if self._tags is None:
249 (self._tags, self._tagtypes) = self._findtags()
249 (self._tags, self._tagtypes) = self._findtags()
250
250
251 return self._tags
251 return self._tags
252
252
253 def _findtags(self):
253 def _findtags(self):
254 '''Do the hard work of finding tags. Return a pair of dicts
254 '''Do the hard work of finding tags. Return a pair of dicts
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 maps tag name to a string like \'global\' or \'local\'.
256 maps tag name to a string like \'global\' or \'local\'.
257 Subclasses or extensions are free to add their own tags, but
257 Subclasses or extensions are free to add their own tags, but
258 should be aware that the returned dicts will be retained for the
258 should be aware that the returned dicts will be retained for the
259 duration of the localrepo object.'''
259 duration of the localrepo object.'''
260
260
261 # XXX what tagtype should subclasses/extensions use? Currently
261 # XXX what tagtype should subclasses/extensions use? Currently
262 # mq and bookmarks add tags, but do not set the tagtype at all.
262 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # Should each extension invent its own tag type? Should there
263 # Should each extension invent its own tag type? Should there
264 # be one tagtype for all such "virtual" tags? Or is the status
264 # be one tagtype for all such "virtual" tags? Or is the status
265 # quo fine?
265 # quo fine?
266
266
267 alltags = {} # map tag name to (node, hist)
267 alltags = {} # map tag name to (node, hist)
268 tagtypes = {}
268 tagtypes = {}
269
269
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272
272
273 # Build the return dicts. Have to re-encode tag names because
273 # Build the return dicts. Have to re-encode tag names because
274 # the tags module always uses UTF-8 (in order not to lose info
274 # the tags module always uses UTF-8 (in order not to lose info
275 # writing to the cache), but the rest of Mercurial wants them in
275 # writing to the cache), but the rest of Mercurial wants them in
276 # local encoding.
276 # local encoding.
277 tags = {}
277 tags = {}
278 for (name, (node, hist)) in alltags.iteritems():
278 for (name, (node, hist)) in alltags.iteritems():
279 if node != nullid:
279 if node != nullid:
280 tags[encoding.tolocal(name)] = node
280 tags[encoding.tolocal(name)] = node
281 tags['tip'] = self.changelog.tip()
281 tags['tip'] = self.changelog.tip()
282 tagtypes = dict([(encoding.tolocal(name), value)
282 tagtypes = dict([(encoding.tolocal(name), value)
283 for (name, value) in tagtypes.iteritems()])
283 for (name, value) in tagtypes.iteritems()])
284 return (tags, tagtypes)
284 return (tags, tagtypes)
285
285
286 def tagtype(self, tagname):
286 def tagtype(self, tagname):
287 '''
287 '''
288 return the type of the given tag. result can be:
288 return the type of the given tag. result can be:
289
289
290 'local' : a local tag
290 'local' : a local tag
291 'global' : a global tag
291 'global' : a global tag
292 None : tag does not exist
292 None : tag does not exist
293 '''
293 '''
294
294
295 self.tags()
295 self.tags()
296
296
297 return self._tagtypes.get(tagname)
297 return self._tagtypes.get(tagname)
298
298
299 def tagslist(self):
299 def tagslist(self):
300 '''return a list of tags ordered by revision'''
300 '''return a list of tags ordered by revision'''
301 l = []
301 l = []
302 for t, n in self.tags().iteritems():
302 for t, n in self.tags().iteritems():
303 try:
303 try:
304 r = self.changelog.rev(n)
304 r = self.changelog.rev(n)
305 except:
305 except:
306 r = -2 # sort to the beginning of the list if unknown
306 r = -2 # sort to the beginning of the list if unknown
307 l.append((r, t, n))
307 l.append((r, t, n))
308 return [(t, n) for r, t, n in sorted(l)]
308 return [(t, n) for r, t, n in sorted(l)]
309
309
310 def nodetags(self, node):
310 def nodetags(self, node):
311 '''return the tags associated with a node'''
311 '''return the tags associated with a node'''
312 if not self.nodetagscache:
312 if not self.nodetagscache:
313 self.nodetagscache = {}
313 self.nodetagscache = {}
314 for t, n in self.tags().iteritems():
314 for t, n in self.tags().iteritems():
315 self.nodetagscache.setdefault(n, []).append(t)
315 self.nodetagscache.setdefault(n, []).append(t)
316 return self.nodetagscache.get(node, [])
316 return self.nodetagscache.get(node, [])
317
317
318 def _branchtags(self, partial, lrev):
318 def _branchtags(self, partial, lrev):
319 # TODO: rename this function?
319 # TODO: rename this function?
320 tiprev = len(self) - 1
320 tiprev = len(self) - 1
321 if lrev != tiprev:
321 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
324
325 return partial
325 return partial
326
326
327 def branchmap(self):
327 def branchmap(self):
328 tip = self.changelog.tip()
328 tip = self.changelog.tip()
329 if self._branchcache is not None and self._branchcachetip == tip:
329 if self._branchcache is not None and self._branchcachetip == tip:
330 return self._branchcache
330 return self._branchcache
331
331
332 oldtip = self._branchcachetip
332 oldtip = self._branchcachetip
333 self._branchcachetip = tip
333 self._branchcachetip = tip
334 if oldtip is None or oldtip not in self.changelog.nodemap:
334 if oldtip is None or oldtip not in self.changelog.nodemap:
335 partial, last, lrev = self._readbranchcache()
335 partial, last, lrev = self._readbranchcache()
336 else:
336 else:
337 lrev = self.changelog.rev(oldtip)
337 lrev = self.changelog.rev(oldtip)
338 partial = self._branchcache
338 partial = self._branchcache
339
339
340 self._branchtags(partial, lrev)
340 self._branchtags(partial, lrev)
341 # this private cache holds all heads (not just tips)
341 # this private cache holds all heads (not just tips)
342 self._branchcache = partial
342 self._branchcache = partial
343
343
344 return self._branchcache
344 return self._branchcache
345
345
346 def branchtags(self):
346 def branchtags(self):
347 '''return a dict where branch names map to the tipmost head of
347 '''return a dict where branch names map to the tipmost head of
348 the branch, open heads come before closed'''
348 the branch, open heads come before closed'''
349 bt = {}
349 bt = {}
350 for bn, heads in self.branchmap().iteritems():
350 for bn, heads in self.branchmap().iteritems():
351 head = None
351 head = None
352 for i in range(len(heads)-1, -1, -1):
352 for i in range(len(heads)-1, -1, -1):
353 h = heads[i]
353 h = heads[i]
354 if 'close' not in self.changelog.read(h)[5]:
354 if 'close' not in self.changelog.read(h)[5]:
355 head = h
355 head = h
356 break
356 break
357 # no open heads were found
357 # no open heads were found
358 if head is None:
358 if head is None:
359 head = heads[-1]
359 head = heads[-1]
360 bt[bn] = head
360 bt[bn] = head
361 return bt
361 return bt
362
362
363
363
364 def _readbranchcache(self):
364 def _readbranchcache(self):
365 partial = {}
365 partial = {}
366 try:
366 try:
367 f = self.opener("branchheads.cache")
367 f = self.opener("branchheads.cache")
368 lines = f.read().split('\n')
368 lines = f.read().split('\n')
369 f.close()
369 f.close()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 return {}, nullid, nullrev
371 return {}, nullid, nullrev
372
372
373 try:
373 try:
374 last, lrev = lines.pop(0).split(" ", 1)
374 last, lrev = lines.pop(0).split(" ", 1)
375 last, lrev = bin(last), int(lrev)
375 last, lrev = bin(last), int(lrev)
376 if lrev >= len(self) or self[lrev].node() != last:
376 if lrev >= len(self) or self[lrev].node() != last:
377 # invalidate the cache
377 # invalidate the cache
378 raise ValueError('invalidating branch cache (tip differs)')
378 raise ValueError('invalidating branch cache (tip differs)')
379 for l in lines:
379 for l in lines:
380 if not l: continue
380 if not l: continue
381 node, label = l.split(" ", 1)
381 node, label = l.split(" ", 1)
382 partial.setdefault(label.strip(), []).append(bin(node))
382 partial.setdefault(label.strip(), []).append(bin(node))
383 except KeyboardInterrupt:
383 except KeyboardInterrupt:
384 raise
384 raise
385 except Exception, inst:
385 except Exception, inst:
386 if self.ui.debugflag:
386 if self.ui.debugflag:
387 self.ui.warn(str(inst), '\n')
387 self.ui.warn(str(inst), '\n')
388 partial, last, lrev = {}, nullid, nullrev
388 partial, last, lrev = {}, nullid, nullrev
389 return partial, last, lrev
389 return partial, last, lrev
390
390
391 def _writebranchcache(self, branches, tip, tiprev):
391 def _writebranchcache(self, branches, tip, tiprev):
392 try:
392 try:
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
394 f.write("%s %s\n" % (hex(tip), tiprev))
394 f.write("%s %s\n" % (hex(tip), tiprev))
395 for label, nodes in branches.iteritems():
395 for label, nodes in branches.iteritems():
396 for node in nodes:
396 for node in nodes:
397 f.write("%s %s\n" % (hex(node), label))
397 f.write("%s %s\n" % (hex(node), label))
398 f.rename()
398 f.rename()
399 except (IOError, OSError):
399 except (IOError, OSError):
400 pass
400 pass
401
401
402 def _updatebranchcache(self, partial, start, end):
402 def _updatebranchcache(self, partial, start, end):
403 # collect new branch entries
403 # collect new branch entries
404 newbranches = {}
404 newbranches = {}
405 for r in xrange(start, end):
405 for r in xrange(start, end):
406 c = self[r]
406 c = self[r]
407 newbranches.setdefault(c.branch(), []).append(c.node())
407 newbranches.setdefault(c.branch(), []).append(c.node())
408 # if older branchheads are reachable from new ones, they aren't
408 # if older branchheads are reachable from new ones, they aren't
409 # really branchheads. Note checking parents is insufficient:
409 # really branchheads. Note checking parents is insufficient:
410 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
411 for branch, newnodes in newbranches.iteritems():
411 for branch, newnodes in newbranches.iteritems():
412 bheads = partial.setdefault(branch, [])
412 bheads = partial.setdefault(branch, [])
413 bheads.extend(newnodes)
413 bheads.extend(newnodes)
414 if len(bheads) < 2:
414 if len(bheads) < 2:
415 continue
415 continue
416 newbheads = []
416 newbheads = []
417 # starting from tip means fewer passes over reachable
417 # starting from tip means fewer passes over reachable
418 while newnodes:
418 while newnodes:
419 latest = newnodes.pop()
419 latest = newnodes.pop()
420 if latest not in bheads:
420 if latest not in bheads:
421 continue
421 continue
422 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
423 reachable = self.changelog.reachable(latest, minbhrev)
423 reachable = self.changelog.reachable(latest, minbhrev)
424 bheads = [b for b in bheads if b not in reachable]
424 bheads = [b for b in bheads if b not in reachable]
425 newbheads.insert(0, latest)
425 newbheads.insert(0, latest)
426 bheads.extend(newbheads)
426 bheads.extend(newbheads)
427 partial[branch] = bheads
427 partial[branch] = bheads
428
428
429 def lookup(self, key):
429 def lookup(self, key):
430 if isinstance(key, int):
430 if isinstance(key, int):
431 return self.changelog.node(key)
431 return self.changelog.node(key)
432 elif key == '.':
432 elif key == '.':
433 return self.dirstate.parents()[0]
433 return self.dirstate.parents()[0]
434 elif key == 'null':
434 elif key == 'null':
435 return nullid
435 return nullid
436 elif key == 'tip':
436 elif key == 'tip':
437 return self.changelog.tip()
437 return self.changelog.tip()
438 n = self.changelog._match(key)
438 n = self.changelog._match(key)
439 if n:
439 if n:
440 return n
440 return n
441 if key in self.tags():
441 if key in self.tags():
442 return self.tags()[key]
442 return self.tags()[key]
443 if key in self.branchtags():
443 if key in self.branchtags():
444 return self.branchtags()[key]
444 return self.branchtags()[key]
445 n = self.changelog._partialmatch(key)
445 n = self.changelog._partialmatch(key)
446 if n:
446 if n:
447 return n
447 return n
448
448
449 # can't find key, check if it might have come from damaged dirstate
449 # can't find key, check if it might have come from damaged dirstate
450 if key in self.dirstate.parents():
450 if key in self.dirstate.parents():
451 raise error.Abort(_("working directory has unknown parent '%s'!")
451 raise error.Abort(_("working directory has unknown parent '%s'!")
452 % short(key))
452 % short(key))
453 try:
453 try:
454 if len(key) == 20:
454 if len(key) == 20:
455 key = hex(key)
455 key = hex(key)
456 except:
456 except:
457 pass
457 pass
458 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458 raise error.RepoLookupError(_("unknown revision '%s'") % key)
459
459
460 def local(self):
460 def local(self):
461 return True
461 return True
462
462
463 def join(self, f):
463 def join(self, f):
464 return os.path.join(self.path, f)
464 return os.path.join(self.path, f)
465
465
466 def wjoin(self, f):
466 def wjoin(self, f):
467 return os.path.join(self.root, f)
467 return os.path.join(self.root, f)
468
468
469 def rjoin(self, f):
469 def rjoin(self, f):
470 return os.path.join(self.root, util.pconvert(f))
470 return os.path.join(self.root, util.pconvert(f))
471
471
472 def file(self, f):
472 def file(self, f):
473 if f[0] == '/':
473 if f[0] == '/':
474 f = f[1:]
474 f = f[1:]
475 return filelog.filelog(self.sopener, f)
475 return filelog.filelog(self.sopener, f)
476
476
477 def changectx(self, changeid):
477 def changectx(self, changeid):
478 return self[changeid]
478 return self[changeid]
479
479
480 def parents(self, changeid=None):
480 def parents(self, changeid=None):
481 '''get list of changectxs for parents of changeid'''
481 '''get list of changectxs for parents of changeid'''
482 return self[changeid].parents()
482 return self[changeid].parents()
483
483
484 def filectx(self, path, changeid=None, fileid=None):
484 def filectx(self, path, changeid=None, fileid=None):
485 """changeid can be a changeset revision, node, or tag.
485 """changeid can be a changeset revision, node, or tag.
486 fileid can be a file revision or node."""
486 fileid can be a file revision or node."""
487 return context.filectx(self, path, changeid, fileid)
487 return context.filectx(self, path, changeid, fileid)
488
488
489 def getcwd(self):
489 def getcwd(self):
490 return self.dirstate.getcwd()
490 return self.dirstate.getcwd()
491
491
492 def pathto(self, f, cwd=None):
492 def pathto(self, f, cwd=None):
493 return self.dirstate.pathto(f, cwd)
493 return self.dirstate.pathto(f, cwd)
494
494
495 def wfile(self, f, mode='r'):
495 def wfile(self, f, mode='r'):
496 return self.wopener(f, mode)
496 return self.wopener(f, mode)
497
497
498 def _link(self, f):
498 def _link(self, f):
499 return os.path.islink(self.wjoin(f))
499 return os.path.islink(self.wjoin(f))
500
500
501 def _filter(self, filter, filename, data):
501 def _filter(self, filter, filename, data):
502 if filter not in self.filterpats:
502 if filter not in self.filterpats:
503 l = []
503 l = []
504 for pat, cmd in self.ui.configitems(filter):
504 for pat, cmd in self.ui.configitems(filter):
505 if cmd == '!':
505 if cmd == '!':
506 continue
506 continue
507 mf = match_.match(self.root, '', [pat])
507 mf = match_.match(self.root, '', [pat])
508 fn = None
508 fn = None
509 params = cmd
509 params = cmd
510 for name, filterfn in self._datafilters.iteritems():
510 for name, filterfn in self._datafilters.iteritems():
511 if cmd.startswith(name):
511 if cmd.startswith(name):
512 fn = filterfn
512 fn = filterfn
513 params = cmd[len(name):].lstrip()
513 params = cmd[len(name):].lstrip()
514 break
514 break
515 if not fn:
515 if not fn:
516 fn = lambda s, c, **kwargs: util.filter(s, c)
516 fn = lambda s, c, **kwargs: util.filter(s, c)
517 # Wrap old filters not supporting keyword arguments
517 # Wrap old filters not supporting keyword arguments
518 if not inspect.getargspec(fn)[2]:
518 if not inspect.getargspec(fn)[2]:
519 oldfn = fn
519 oldfn = fn
520 fn = lambda s, c, **kwargs: oldfn(s, c)
520 fn = lambda s, c, **kwargs: oldfn(s, c)
521 l.append((mf, fn, params))
521 l.append((mf, fn, params))
522 self.filterpats[filter] = l
522 self.filterpats[filter] = l
523
523
524 for mf, fn, cmd in self.filterpats[filter]:
524 for mf, fn, cmd in self.filterpats[filter]:
525 if mf(filename):
525 if mf(filename):
526 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 break
528 break
529
529
530 return data
530 return data
531
531
532 def adddatafilter(self, name, filter):
532 def adddatafilter(self, name, filter):
533 self._datafilters[name] = filter
533 self._datafilters[name] = filter
534
534
535 def wread(self, filename):
535 def wread(self, filename):
536 if self._link(filename):
536 if self._link(filename):
537 data = os.readlink(self.wjoin(filename))
537 data = os.readlink(self.wjoin(filename))
538 else:
538 else:
539 data = self.wopener(filename, 'r').read()
539 data = self.wopener(filename, 'r').read()
540 return self._filter("encode", filename, data)
540 return self._filter("encode", filename, data)
541
541
542 def wwrite(self, filename, data, flags):
542 def wwrite(self, filename, data, flags):
543 data = self._filter("decode", filename, data)
543 data = self._filter("decode", filename, data)
544 try:
544 try:
545 os.unlink(self.wjoin(filename))
545 os.unlink(self.wjoin(filename))
546 except OSError:
546 except OSError:
547 pass
547 pass
548 if 'l' in flags:
548 if 'l' in flags:
549 self.wopener.symlink(data, filename)
549 self.wopener.symlink(data, filename)
550 else:
550 else:
551 self.wopener(filename, 'w').write(data)
551 self.wopener(filename, 'w').write(data)
552 if 'x' in flags:
552 if 'x' in flags:
553 util.set_flags(self.wjoin(filename), False, True)
553 util.set_flags(self.wjoin(filename), False, True)
554
554
555 def wwritedata(self, filename, data):
555 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
556 return self._filter("decode", filename, data)
557
557
558 def transaction(self):
558 def transaction(self):
559 tr = self._transref and self._transref() or None
559 tr = self._transref and self._transref() or None
560 if tr and tr.running():
560 if tr and tr.running():
561 return tr.nest()
561 return tr.nest()
562
562
563 # abort here if the journal already exists
563 # abort here if the journal already exists
564 if os.path.exists(self.sjoin("journal")):
564 if os.path.exists(self.sjoin("journal")):
565 raise error.RepoError(_("abandoned transaction found - run hg recover"))
565 raise error.RepoError(_("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
590 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
591 self.invalidate()
591 self.invalidate()
592 return True
592 return True
593 else:
593 else:
594 self.ui.warn(_("no interrupted transaction available\n"))
594 self.ui.warn(_("no interrupted transaction available\n"))
595 return False
595 return False
596 finally:
596 finally:
597 lock.release()
597 lock.release()
598
598
599 def rollback(self):
599 def rollback(self):
600 wlock = lock = None
600 wlock = lock = None
601 try:
601 try:
602 wlock = self.wlock()
602 wlock = self.wlock()
603 lock = self.lock()
603 lock = self.lock()
604 if os.path.exists(self.sjoin("undo")):
604 if os.path.exists(self.sjoin("undo")):
605 self.ui.status(_("rolling back last transaction\n"))
605 self.ui.status(_("rolling back last transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
606 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
608 try:
608 try:
609 branch = self.opener("undo.branch").read()
609 branch = self.opener("undo.branch").read()
610 self.dirstate.setbranch(branch)
610 self.dirstate.setbranch(branch)
611 except IOError:
611 except IOError:
612 self.ui.warn(_("Named branch could not be reset, "
612 self.ui.warn(_("Named branch could not be reset, "
613 "current branch still is: %s\n")
613 "current branch still is: %s\n")
614 % encoding.tolocal(self.dirstate.branch()))
614 % encoding.tolocal(self.dirstate.branch()))
615 self.invalidate()
615 self.invalidate()
616 self.dirstate.invalidate()
616 self.dirstate.invalidate()
617 self.destroyed()
617 self.destroyed()
618 else:
618 else:
619 self.ui.warn(_("no rollback information available\n"))
619 self.ui.warn(_("no rollback information available\n"))
620 finally:
620 finally:
621 release(lock, wlock)
621 release(lock, wlock)
622
622
623 def invalidate(self):
623 def invalidate(self):
624 for a in "changelog manifest".split():
624 for a in "changelog manifest".split():
625 if a in self.__dict__:
625 if a in self.__dict__:
626 delattr(self, a)
626 delattr(self, a)
627 self._tags = None
627 self._tags = None
628 self._tagtypes = None
628 self._tagtypes = None
629 self.nodetagscache = None
629 self.nodetagscache = None
630 self._branchcache = None # in UTF-8
630 self._branchcache = None # in UTF-8
631 self._branchcachetip = None
631 self._branchcachetip = None
632
632
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
634 try:
634 try:
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
636 except error.LockHeld, inst:
636 except error.LockHeld, inst:
637 if not wait:
637 if not wait:
638 raise
638 raise
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
640 (desc, inst.locker))
640 (desc, inst.locker))
641 # default to 600 seconds timeout
641 # default to 600 seconds timeout
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
643 releasefn, desc=desc)
643 releasefn, desc=desc)
644 if acquirefn:
644 if acquirefn:
645 acquirefn()
645 acquirefn()
646 return l
646 return l
647
647
648 def lock(self, wait=True):
648 def lock(self, wait=True):
649 '''Lock the repository store (.hg/store) and return a weak reference
649 '''Lock the repository store (.hg/store) and return a weak reference
650 to the lock. Use this before modifying the store (e.g. committing or
650 to the lock. Use this before modifying the store (e.g. committing or
651 stripping). If you are opening a transaction, get a lock as well.)'''
651 stripping). If you are opening a transaction, get a lock as well.)'''
652 l = self._lockref and self._lockref()
652 l = self._lockref and self._lockref()
653 if l is not None and l.held:
653 if l is not None and l.held:
654 l.lock()
654 l.lock()
655 return l
655 return l
656
656
657 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
658 _('repository %s') % self.origroot)
658 _('repository %s') % self.origroot)
659 self._lockref = weakref.ref(l)
659 self._lockref = weakref.ref(l)
660 return l
660 return l
661
661
662 def wlock(self, wait=True):
662 def wlock(self, wait=True):
663 '''Lock the non-store parts of the repository (everything under
663 '''Lock the non-store parts of the repository (everything under
664 .hg except .hg/store) and return a weak reference to the lock.
664 .hg except .hg/store) and return a weak reference to the lock.
665 Use this before modifying files in .hg.'''
665 Use this before modifying files in .hg.'''
666 l = self._wlockref and self._wlockref()
666 l = self._wlockref and self._wlockref()
667 if l is not None and l.held:
667 if l is not None and l.held:
668 l.lock()
668 l.lock()
669 return l
669 return l
670
670
671 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
671 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
672 self.dirstate.invalidate, _('working directory of %s') %
672 self.dirstate.invalidate, _('working directory of %s') %
673 self.origroot)
673 self.origroot)
674 self._wlockref = weakref.ref(l)
674 self._wlockref = weakref.ref(l)
675 return l
675 return l
676
676
677 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
677 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
678 """
678 """
679 commit an individual file as part of a larger transaction
679 commit an individual file as part of a larger transaction
680 """
680 """
681
681
682 fname = fctx.path()
682 fname = fctx.path()
683 text = fctx.data()
683 text = fctx.data()
684 flog = self.file(fname)
684 flog = self.file(fname)
685 fparent1 = manifest1.get(fname, nullid)
685 fparent1 = manifest1.get(fname, nullid)
686 fparent2 = fparent2o = manifest2.get(fname, nullid)
686 fparent2 = fparent2o = manifest2.get(fname, nullid)
687
687
688 meta = {}
688 meta = {}
689 copy = fctx.renamed()
689 copy = fctx.renamed()
690 if copy and copy[0] != fname:
690 if copy and copy[0] != fname:
691 # Mark the new revision of this file as a copy of another
691 # Mark the new revision of this file as a copy of another
692 # file. This copy data will effectively act as a parent
692 # file. This copy data will effectively act as a parent
693 # of this new revision. If this is a merge, the first
693 # of this new revision. If this is a merge, the first
694 # parent will be the nullid (meaning "look up the copy data")
694 # parent will be the nullid (meaning "look up the copy data")
695 # and the second one will be the other parent. For example:
695 # and the second one will be the other parent. For example:
696 #
696 #
697 # 0 --- 1 --- 3 rev1 changes file foo
697 # 0 --- 1 --- 3 rev1 changes file foo
698 # \ / rev2 renames foo to bar and changes it
698 # \ / rev2 renames foo to bar and changes it
699 # \- 2 -/ rev3 should have bar with all changes and
699 # \- 2 -/ rev3 should have bar with all changes and
700 # should record that bar descends from
700 # should record that bar descends from
701 # bar in rev2 and foo in rev1
701 # bar in rev2 and foo in rev1
702 #
702 #
703 # this allows this merge to succeed:
703 # this allows this merge to succeed:
704 #
704 #
705 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
705 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
706 # \ / merging rev3 and rev4 should use bar@rev2
706 # \ / merging rev3 and rev4 should use bar@rev2
707 # \- 2 --- 4 as the merge base
707 # \- 2 --- 4 as the merge base
708 #
708 #
709
709
710 cfname = copy[0]
710 cfname = copy[0]
711 crev = manifest1.get(cfname)
711 crev = manifest1.get(cfname)
712 newfparent = fparent2
712 newfparent = fparent2
713
713
714 if manifest2: # branch merge
714 if manifest2: # branch merge
715 if fparent2 == nullid or crev is None: # copied on remote side
715 if fparent2 == nullid or crev is None: # copied on remote side
716 if cfname in manifest2:
716 if cfname in manifest2:
717 crev = manifest2[cfname]
717 crev = manifest2[cfname]
718 newfparent = fparent1
718 newfparent = fparent1
719
719
720 # find source in nearest ancestor if we've lost track
720 # find source in nearest ancestor if we've lost track
721 if not crev:
721 if not crev:
722 self.ui.debug(" %s: searching for copy revision for %s\n" %
722 self.ui.debug(" %s: searching for copy revision for %s\n" %
723 (fname, cfname))
723 (fname, cfname))
724 for ancestor in self['.'].ancestors():
724 for ancestor in self['.'].ancestors():
725 if cfname in ancestor:
725 if cfname in ancestor:
726 crev = ancestor[cfname].filenode()
726 crev = ancestor[cfname].filenode()
727 break
727 break
728
728
729 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
729 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
730 meta["copy"] = cfname
730 meta["copy"] = cfname
731 meta["copyrev"] = hex(crev)
731 meta["copyrev"] = hex(crev)
732 fparent1, fparent2 = nullid, newfparent
732 fparent1, fparent2 = nullid, newfparent
733 elif fparent2 != nullid:
733 elif fparent2 != nullid:
734 # is one parent an ancestor of the other?
734 # is one parent an ancestor of the other?
735 fparentancestor = flog.ancestor(fparent1, fparent2)
735 fparentancestor = flog.ancestor(fparent1, fparent2)
736 if fparentancestor == fparent1:
736 if fparentancestor == fparent1:
737 fparent1, fparent2 = fparent2, nullid
737 fparent1, fparent2 = fparent2, nullid
738 elif fparentancestor == fparent2:
738 elif fparentancestor == fparent2:
739 fparent2 = nullid
739 fparent2 = nullid
740
740
741 # is the file changed?
741 # is the file changed?
742 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
742 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
743 changelist.append(fname)
743 changelist.append(fname)
744 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
744 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
745
745
746 # are just the flags changed during merge?
746 # are just the flags changed during merge?
747 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
747 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
748 changelist.append(fname)
748 changelist.append(fname)
749
749
750 return fparent1
750 return fparent1
751
751
752 def commit(self, text="", user=None, date=None, match=None, force=False,
752 def commit(self, text="", user=None, date=None, match=None, force=False,
753 editor=False, extra={}):
753 editor=False, extra={}):
754 """Add a new revision to current repository.
754 """Add a new revision to current repository.
755
755
756 Revision information is gathered from the working directory,
756 Revision information is gathered from the working directory,
757 match can be used to filter the committed files. If editor is
757 match can be used to filter the committed files. If editor is
758 supplied, it is called to get a commit message.
758 supplied, it is called to get a commit message.
759 """
759 """
760
760
761 def fail(f, msg):
761 def fail(f, msg):
762 raise util.Abort('%s: %s' % (f, msg))
762 raise util.Abort('%s: %s' % (f, msg))
763
763
764 if not match:
764 if not match:
765 match = match_.always(self.root, '')
765 match = match_.always(self.root, '')
766
766
767 if not force:
767 if not force:
768 vdirs = []
768 vdirs = []
769 match.dir = vdirs.append
769 match.dir = vdirs.append
770 match.bad = fail
770 match.bad = fail
771
771
772 wlock = self.wlock()
772 wlock = self.wlock()
773 try:
773 try:
774 p1, p2 = self.dirstate.parents()
774 p1, p2 = self.dirstate.parents()
775 wctx = self[None]
775 wctx = self[None]
776
776
777 if (not force and p2 != nullid and match and
777 if (not force and p2 != nullid and match and
778 (match.files() or match.anypats())):
778 (match.files() or match.anypats())):
779 raise util.Abort(_('cannot partially commit a merge '
779 raise util.Abort(_('cannot partially commit a merge '
780 '(do not specify files or patterns)'))
780 '(do not specify files or patterns)'))
781
781
782 changes = self.status(match=match, clean=force)
782 changes = self.status(match=match, clean=force)
783 if force:
783 if force:
784 changes[0].extend(changes[6]) # mq may commit unchanged files
784 changes[0].extend(changes[6]) # mq may commit unchanged files
785
785
786 # check subrepos
786 # check subrepos
787 subs = []
787 subs = []
788 for s in wctx.substate:
788 for s in wctx.substate:
789 if match(s) and wctx.sub(s).dirty():
789 if match(s) and wctx.sub(s).dirty():
790 subs.append(s)
790 subs.append(s)
791 if subs and '.hgsubstate' not in changes[0]:
791 if subs and '.hgsubstate' not in changes[0]:
792 changes[0].insert(0, '.hgsubstate')
792 changes[0].insert(0, '.hgsubstate')
793
793
794 # make sure all explicit patterns are matched
794 # make sure all explicit patterns are matched
795 if not force and match.files():
795 if not force and match.files():
796 matched = set(changes[0] + changes[1] + changes[2])
796 matched = set(changes[0] + changes[1] + changes[2])
797
797
798 for f in match.files():
798 for f in match.files():
799 if f == '.' or f in matched or f in wctx.substate:
799 if f == '.' or f in matched or f in wctx.substate:
800 continue
800 continue
801 if f in changes[3]: # missing
801 if f in changes[3]: # missing
802 fail(f, _('file not found!'))
802 fail(f, _('file not found!'))
803 if f in vdirs: # visited directory
803 if f in vdirs: # visited directory
804 d = f + '/'
804 d = f + '/'
805 for mf in matched:
805 for mf in matched:
806 if mf.startswith(d):
806 if mf.startswith(d):
807 break
807 break
808 else:
808 else:
809 fail(f, _("no match under directory!"))
809 fail(f, _("no match under directory!"))
810 elif f not in self.dirstate:
810 elif f not in self.dirstate:
811 fail(f, _("file not tracked!"))
811 fail(f, _("file not tracked!"))
812
812
813 if (not force and not extra.get("close") and p2 == nullid
813 if (not force and not extra.get("close") and p2 == nullid
814 and not (changes[0] or changes[1] or changes[2])
814 and not (changes[0] or changes[1] or changes[2])
815 and self[None].branch() == self['.'].branch()):
815 and self[None].branch() == self['.'].branch()):
816 return None
816 return None
817
817
818 ms = merge_.mergestate(self)
818 ms = merge_.mergestate(self)
819 for f in changes[0]:
819 for f in changes[0]:
820 if f in ms and ms[f] == 'u':
820 if f in ms and ms[f] == 'u':
821 raise util.Abort(_("unresolved merge conflicts "
821 raise util.Abort(_("unresolved merge conflicts "
822 "(see hg resolve)"))
822 "(see hg resolve)"))
823
823
824 cctx = context.workingctx(self, (p1, p2), text, user, date,
824 cctx = context.workingctx(self, (p1, p2), text, user, date,
825 extra, changes)
825 extra, changes)
826 if editor:
826 if editor:
827 cctx._text = editor(self, cctx, subs)
827 cctx._text = editor(self, cctx, subs)
828
828
829 # commit subs
829 # commit subs
830 if subs:
830 if subs:
831 state = wctx.substate.copy()
831 state = wctx.substate.copy()
832 for s in subs:
832 for s in subs:
833 self.ui.status(_('committing subrepository %s\n') % s)
833 self.ui.status(_('committing subrepository %s\n') % s)
834 sr = wctx.sub(s).commit(cctx._text, user, date)
834 sr = wctx.sub(s).commit(cctx._text, user, date)
835 state[s] = (state[s][0], sr)
835 state[s] = (state[s][0], sr)
836 subrepo.writestate(self, state)
836 subrepo.writestate(self, state)
837
837
838 # Save commit message in case this transaction gets rolled back
839 # (e.g. by a pretxncommit hook). (Save in text mode in case a
840 # Windows user wants to edit it with Notepad. Normalize
841 # trailing whitespace so the file always looks the same --
842 # makes testing easier.)
843 msgfile = self.opener('last-message.txt', 'w')
844 msgfile.write(cctx._text.rstrip() + '\n')
845 msgfile.close()
846
838 ret = self.commitctx(cctx, True)
847 ret = self.commitctx(cctx, True)
839
848
840 # update dirstate and mergestate
849 # update dirstate and mergestate
841 for f in changes[0] + changes[1]:
850 for f in changes[0] + changes[1]:
842 self.dirstate.normal(f)
851 self.dirstate.normal(f)
843 for f in changes[2]:
852 for f in changes[2]:
844 self.dirstate.forget(f)
853 self.dirstate.forget(f)
845 self.dirstate.setparents(ret)
854 self.dirstate.setparents(ret)
846 ms.reset()
855 ms.reset()
847
856
848 return ret
857 return ret
849
858
850 finally:
859 finally:
851 wlock.release()
860 wlock.release()
852
861
853 def commitctx(self, ctx, error=False):
862 def commitctx(self, ctx, error=False):
854 """Add a new revision to current repository.
863 """Add a new revision to current repository.
855
864
856 Revision information is passed via the context argument.
865 Revision information is passed via the context argument.
857 """
866 """
858
867
859 tr = lock = None
868 tr = lock = None
860 removed = ctx.removed()
869 removed = ctx.removed()
861 p1, p2 = ctx.p1(), ctx.p2()
870 p1, p2 = ctx.p1(), ctx.p2()
862 m1 = p1.manifest().copy()
871 m1 = p1.manifest().copy()
863 m2 = p2.manifest()
872 m2 = p2.manifest()
864 user = ctx.user()
873 user = ctx.user()
865
874
866 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
875 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
867 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
876 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
868
877
869 lock = self.lock()
878 lock = self.lock()
870 try:
879 try:
871 tr = self.transaction()
880 tr = self.transaction()
872 trp = weakref.proxy(tr)
881 trp = weakref.proxy(tr)
873
882
874 # check in files
883 # check in files
875 new = {}
884 new = {}
876 changed = []
885 changed = []
877 linkrev = len(self)
886 linkrev = len(self)
878 for f in sorted(ctx.modified() + ctx.added()):
887 for f in sorted(ctx.modified() + ctx.added()):
879 self.ui.note(f + "\n")
888 self.ui.note(f + "\n")
880 try:
889 try:
881 fctx = ctx[f]
890 fctx = ctx[f]
882 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
891 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
883 changed)
892 changed)
884 m1.set(f, fctx.flags())
893 m1.set(f, fctx.flags())
885 except (OSError, IOError):
894 except (OSError, IOError):
886 if error:
895 if error:
887 self.ui.warn(_("trouble committing %s!\n") % f)
896 self.ui.warn(_("trouble committing %s!\n") % f)
888 raise
897 raise
889 else:
898 else:
890 removed.append(f)
899 removed.append(f)
891
900
892 # update manifest
901 # update manifest
893 m1.update(new)
902 m1.update(new)
894 removed = [f for f in sorted(removed) if f in m1 or f in m2]
903 removed = [f for f in sorted(removed) if f in m1 or f in m2]
895 drop = [f for f in removed if f in m1]
904 drop = [f for f in removed if f in m1]
896 for f in drop:
905 for f in drop:
897 del m1[f]
906 del m1[f]
898 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
907 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
899 p2.manifestnode(), (new, drop))
908 p2.manifestnode(), (new, drop))
900
909
901 # update changelog
910 # update changelog
902 self.changelog.delayupdate()
911 self.changelog.delayupdate()
903 n = self.changelog.add(mn, changed + removed, ctx.description(),
912 n = self.changelog.add(mn, changed + removed, ctx.description(),
904 trp, p1.node(), p2.node(),
913 trp, p1.node(), p2.node(),
905 user, ctx.date(), ctx.extra().copy())
914 user, ctx.date(), ctx.extra().copy())
906 p = lambda: self.changelog.writepending() and self.root or ""
915 p = lambda: self.changelog.writepending() and self.root or ""
907 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
916 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
908 parent2=xp2, pending=p)
917 parent2=xp2, pending=p)
909 self.changelog.finalize(trp)
918 self.changelog.finalize(trp)
910 tr.close()
919 tr.close()
911
920
912 if self._branchcache:
921 if self._branchcache:
913 self.branchtags()
922 self.branchtags()
914
923
915 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
916 return n
925 return n
917 finally:
926 finally:
918 del tr
927 del tr
919 lock.release()
928 lock.release()
920
929
921 def destroyed(self):
930 def destroyed(self):
922 '''Inform the repository that nodes have been destroyed.
931 '''Inform the repository that nodes have been destroyed.
923 Intended for use by strip and rollback, so there's a common
932 Intended for use by strip and rollback, so there's a common
924 place for anything that has to be done after destroying history.'''
933 place for anything that has to be done after destroying history.'''
925 # XXX it might be nice if we could take the list of destroyed
934 # XXX it might be nice if we could take the list of destroyed
926 # nodes, but I don't see an easy way for rollback() to do that
935 # nodes, but I don't see an easy way for rollback() to do that
927
936
928 # Ensure the persistent tag cache is updated. Doing it now
937 # Ensure the persistent tag cache is updated. Doing it now
929 # means that the tag cache only has to worry about destroyed
938 # means that the tag cache only has to worry about destroyed
930 # heads immediately after a strip/rollback. That in turn
939 # heads immediately after a strip/rollback. That in turn
931 # guarantees that "cachetip == currenttip" (comparing both rev
940 # guarantees that "cachetip == currenttip" (comparing both rev
932 # and node) always means no nodes have been added or destroyed.
941 # and node) always means no nodes have been added or destroyed.
933
942
934 # XXX this is suboptimal when qrefresh'ing: we strip the current
943 # XXX this is suboptimal when qrefresh'ing: we strip the current
935 # head, refresh the tag cache, then immediately add a new head.
944 # head, refresh the tag cache, then immediately add a new head.
936 # But I think doing it this way is necessary for the "instant
945 # But I think doing it this way is necessary for the "instant
937 # tag cache retrieval" case to work.
946 # tag cache retrieval" case to work.
938 tags_.findglobaltags(self.ui, self, {}, {})
947 tags_.findglobaltags(self.ui, self, {}, {})
939
948
940 def walk(self, match, node=None):
949 def walk(self, match, node=None):
941 '''
950 '''
942 walk recursively through the directory tree or a given
951 walk recursively through the directory tree or a given
943 changeset, finding all files matched by the match
952 changeset, finding all files matched by the match
944 function
953 function
945 '''
954 '''
946 return self[node].walk(match)
955 return self[node].walk(match)
947
956
948 def status(self, node1='.', node2=None, match=None,
957 def status(self, node1='.', node2=None, match=None,
949 ignored=False, clean=False, unknown=False):
958 ignored=False, clean=False, unknown=False):
950 """return status of files between two nodes or node and working directory
959 """return status of files between two nodes or node and working directory
951
960
952 If node1 is None, use the first dirstate parent instead.
961 If node1 is None, use the first dirstate parent instead.
953 If node2 is None, compare node1 with working directory.
962 If node2 is None, compare node1 with working directory.
954 """
963 """
955
964
956 def mfmatches(ctx):
965 def mfmatches(ctx):
957 mf = ctx.manifest().copy()
966 mf = ctx.manifest().copy()
958 for fn in mf.keys():
967 for fn in mf.keys():
959 if not match(fn):
968 if not match(fn):
960 del mf[fn]
969 del mf[fn]
961 return mf
970 return mf
962
971
963 if isinstance(node1, context.changectx):
972 if isinstance(node1, context.changectx):
964 ctx1 = node1
973 ctx1 = node1
965 else:
974 else:
966 ctx1 = self[node1]
975 ctx1 = self[node1]
967 if isinstance(node2, context.changectx):
976 if isinstance(node2, context.changectx):
968 ctx2 = node2
977 ctx2 = node2
969 else:
978 else:
970 ctx2 = self[node2]
979 ctx2 = self[node2]
971
980
972 working = ctx2.rev() is None
981 working = ctx2.rev() is None
973 parentworking = working and ctx1 == self['.']
982 parentworking = working and ctx1 == self['.']
974 match = match or match_.always(self.root, self.getcwd())
983 match = match or match_.always(self.root, self.getcwd())
975 listignored, listclean, listunknown = ignored, clean, unknown
984 listignored, listclean, listunknown = ignored, clean, unknown
976
985
977 # load earliest manifest first for caching reasons
986 # load earliest manifest first for caching reasons
978 if not working and ctx2.rev() < ctx1.rev():
987 if not working and ctx2.rev() < ctx1.rev():
979 ctx2.manifest()
988 ctx2.manifest()
980
989
981 if not parentworking:
990 if not parentworking:
982 def bad(f, msg):
991 def bad(f, msg):
983 if f not in ctx1:
992 if f not in ctx1:
984 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
985 match.bad = bad
994 match.bad = bad
986
995
987 if working: # we need to scan the working dir
996 if working: # we need to scan the working dir
988 s = self.dirstate.status(match, listignored, listclean, listunknown)
997 s = self.dirstate.status(match, listignored, listclean, listunknown)
989 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
998 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
990
999
991 # check for any possibly clean files
1000 # check for any possibly clean files
992 if parentworking and cmp:
1001 if parentworking and cmp:
993 fixup = []
1002 fixup = []
994 # do a full compare of any files that might have changed
1003 # do a full compare of any files that might have changed
995 for f in sorted(cmp):
1004 for f in sorted(cmp):
996 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1005 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
997 or ctx1[f].cmp(ctx2[f].data())):
1006 or ctx1[f].cmp(ctx2[f].data())):
998 modified.append(f)
1007 modified.append(f)
999 else:
1008 else:
1000 fixup.append(f)
1009 fixup.append(f)
1001
1010
1002 if listclean:
1011 if listclean:
1003 clean += fixup
1012 clean += fixup
1004
1013
1005 # update dirstate for files that are actually clean
1014 # update dirstate for files that are actually clean
1006 if fixup:
1015 if fixup:
1007 try:
1016 try:
1008 # updating the dirstate is optional
1017 # updating the dirstate is optional
1009 # so we don't wait on the lock
1018 # so we don't wait on the lock
1010 wlock = self.wlock(False)
1019 wlock = self.wlock(False)
1011 try:
1020 try:
1012 for f in fixup:
1021 for f in fixup:
1013 self.dirstate.normal(f)
1022 self.dirstate.normal(f)
1014 finally:
1023 finally:
1015 wlock.release()
1024 wlock.release()
1016 except error.LockError:
1025 except error.LockError:
1017 pass
1026 pass
1018
1027
1019 if not parentworking:
1028 if not parentworking:
1020 mf1 = mfmatches(ctx1)
1029 mf1 = mfmatches(ctx1)
1021 if working:
1030 if working:
1022 # we are comparing working dir against non-parent
1031 # we are comparing working dir against non-parent
1023 # generate a pseudo-manifest for the working dir
1032 # generate a pseudo-manifest for the working dir
1024 mf2 = mfmatches(self['.'])
1033 mf2 = mfmatches(self['.'])
1025 for f in cmp + modified + added:
1034 for f in cmp + modified + added:
1026 mf2[f] = None
1035 mf2[f] = None
1027 mf2.set(f, ctx2.flags(f))
1036 mf2.set(f, ctx2.flags(f))
1028 for f in removed:
1037 for f in removed:
1029 if f in mf2:
1038 if f in mf2:
1030 del mf2[f]
1039 del mf2[f]
1031 else:
1040 else:
1032 # we are comparing two revisions
1041 # we are comparing two revisions
1033 deleted, unknown, ignored = [], [], []
1042 deleted, unknown, ignored = [], [], []
1034 mf2 = mfmatches(ctx2)
1043 mf2 = mfmatches(ctx2)
1035
1044
1036 modified, added, clean = [], [], []
1045 modified, added, clean = [], [], []
1037 for fn in mf2:
1046 for fn in mf2:
1038 if fn in mf1:
1047 if fn in mf1:
1039 if (mf1.flags(fn) != mf2.flags(fn) or
1048 if (mf1.flags(fn) != mf2.flags(fn) or
1040 (mf1[fn] != mf2[fn] and
1049 (mf1[fn] != mf2[fn] and
1041 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1042 modified.append(fn)
1051 modified.append(fn)
1043 elif listclean:
1052 elif listclean:
1044 clean.append(fn)
1053 clean.append(fn)
1045 del mf1[fn]
1054 del mf1[fn]
1046 else:
1055 else:
1047 added.append(fn)
1056 added.append(fn)
1048 removed = mf1.keys()
1057 removed = mf1.keys()
1049
1058
1050 r = modified, added, removed, deleted, unknown, ignored, clean
1059 r = modified, added, removed, deleted, unknown, ignored, clean
1051 [l.sort() for l in r]
1060 [l.sort() for l in r]
1052 return r
1061 return r
1053
1062
1054 def add(self, list):
1063 def add(self, list):
1055 wlock = self.wlock()
1064 wlock = self.wlock()
1056 try:
1065 try:
1057 rejected = []
1066 rejected = []
1058 for f in list:
1067 for f in list:
1059 p = self.wjoin(f)
1068 p = self.wjoin(f)
1060 try:
1069 try:
1061 st = os.lstat(p)
1070 st = os.lstat(p)
1062 except:
1071 except:
1063 self.ui.warn(_("%s does not exist!\n") % f)
1072 self.ui.warn(_("%s does not exist!\n") % f)
1064 rejected.append(f)
1073 rejected.append(f)
1065 continue
1074 continue
1066 if st.st_size > 10000000:
1075 if st.st_size > 10000000:
1067 self.ui.warn(_("%s: files over 10MB may cause memory and"
1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1068 " performance problems\n"
1077 " performance problems\n"
1069 "(use 'hg revert %s' to unadd the file)\n")
1078 "(use 'hg revert %s' to unadd the file)\n")
1070 % (f, f))
1079 % (f, f))
1071 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1072 self.ui.warn(_("%s not added: only files and symlinks "
1081 self.ui.warn(_("%s not added: only files and symlinks "
1073 "supported currently\n") % f)
1082 "supported currently\n") % f)
1074 rejected.append(p)
1083 rejected.append(p)
1075 elif self.dirstate[f] in 'amn':
1084 elif self.dirstate[f] in 'amn':
1076 self.ui.warn(_("%s already tracked!\n") % f)
1085 self.ui.warn(_("%s already tracked!\n") % f)
1077 elif self.dirstate[f] == 'r':
1086 elif self.dirstate[f] == 'r':
1078 self.dirstate.normallookup(f)
1087 self.dirstate.normallookup(f)
1079 else:
1088 else:
1080 self.dirstate.add(f)
1089 self.dirstate.add(f)
1081 return rejected
1090 return rejected
1082 finally:
1091 finally:
1083 wlock.release()
1092 wlock.release()
1084
1093
1085 def forget(self, list):
1094 def forget(self, list):
1086 wlock = self.wlock()
1095 wlock = self.wlock()
1087 try:
1096 try:
1088 for f in list:
1097 for f in list:
1089 if self.dirstate[f] != 'a':
1098 if self.dirstate[f] != 'a':
1090 self.ui.warn(_("%s not added!\n") % f)
1099 self.ui.warn(_("%s not added!\n") % f)
1091 else:
1100 else:
1092 self.dirstate.forget(f)
1101 self.dirstate.forget(f)
1093 finally:
1102 finally:
1094 wlock.release()
1103 wlock.release()
1095
1104
1096 def remove(self, list, unlink=False):
1105 def remove(self, list, unlink=False):
1097 if unlink:
1106 if unlink:
1098 for f in list:
1107 for f in list:
1099 try:
1108 try:
1100 util.unlink(self.wjoin(f))
1109 util.unlink(self.wjoin(f))
1101 except OSError, inst:
1110 except OSError, inst:
1102 if inst.errno != errno.ENOENT:
1111 if inst.errno != errno.ENOENT:
1103 raise
1112 raise
1104 wlock = self.wlock()
1113 wlock = self.wlock()
1105 try:
1114 try:
1106 for f in list:
1115 for f in list:
1107 if unlink and os.path.exists(self.wjoin(f)):
1116 if unlink and os.path.exists(self.wjoin(f)):
1108 self.ui.warn(_("%s still exists!\n") % f)
1117 self.ui.warn(_("%s still exists!\n") % f)
1109 elif self.dirstate[f] == 'a':
1118 elif self.dirstate[f] == 'a':
1110 self.dirstate.forget(f)
1119 self.dirstate.forget(f)
1111 elif f not in self.dirstate:
1120 elif f not in self.dirstate:
1112 self.ui.warn(_("%s not tracked!\n") % f)
1121 self.ui.warn(_("%s not tracked!\n") % f)
1113 else:
1122 else:
1114 self.dirstate.remove(f)
1123 self.dirstate.remove(f)
1115 finally:
1124 finally:
1116 wlock.release()
1125 wlock.release()
1117
1126
1118 def undelete(self, list):
1127 def undelete(self, list):
1119 manifests = [self.manifest.read(self.changelog.read(p)[0])
1128 manifests = [self.manifest.read(self.changelog.read(p)[0])
1120 for p in self.dirstate.parents() if p != nullid]
1129 for p in self.dirstate.parents() if p != nullid]
1121 wlock = self.wlock()
1130 wlock = self.wlock()
1122 try:
1131 try:
1123 for f in list:
1132 for f in list:
1124 if self.dirstate[f] != 'r':
1133 if self.dirstate[f] != 'r':
1125 self.ui.warn(_("%s not removed!\n") % f)
1134 self.ui.warn(_("%s not removed!\n") % f)
1126 else:
1135 else:
1127 m = f in manifests[0] and manifests[0] or manifests[1]
1136 m = f in manifests[0] and manifests[0] or manifests[1]
1128 t = self.file(f).read(m[f])
1137 t = self.file(f).read(m[f])
1129 self.wwrite(f, t, m.flags(f))
1138 self.wwrite(f, t, m.flags(f))
1130 self.dirstate.normal(f)
1139 self.dirstate.normal(f)
1131 finally:
1140 finally:
1132 wlock.release()
1141 wlock.release()
1133
1142
1134 def copy(self, source, dest):
1143 def copy(self, source, dest):
1135 p = self.wjoin(dest)
1144 p = self.wjoin(dest)
1136 if not (os.path.exists(p) or os.path.islink(p)):
1145 if not (os.path.exists(p) or os.path.islink(p)):
1137 self.ui.warn(_("%s does not exist!\n") % dest)
1146 self.ui.warn(_("%s does not exist!\n") % dest)
1138 elif not (os.path.isfile(p) or os.path.islink(p)):
1147 elif not (os.path.isfile(p) or os.path.islink(p)):
1139 self.ui.warn(_("copy failed: %s is not a file or a "
1148 self.ui.warn(_("copy failed: %s is not a file or a "
1140 "symbolic link\n") % dest)
1149 "symbolic link\n") % dest)
1141 else:
1150 else:
1142 wlock = self.wlock()
1151 wlock = self.wlock()
1143 try:
1152 try:
1144 if self.dirstate[dest] in '?r':
1153 if self.dirstate[dest] in '?r':
1145 self.dirstate.add(dest)
1154 self.dirstate.add(dest)
1146 self.dirstate.copy(source, dest)
1155 self.dirstate.copy(source, dest)
1147 finally:
1156 finally:
1148 wlock.release()
1157 wlock.release()
1149
1158
1150 def heads(self, start=None):
1159 def heads(self, start=None):
1151 heads = self.changelog.heads(start)
1160 heads = self.changelog.heads(start)
1152 # sort the output in rev descending order
1161 # sort the output in rev descending order
1153 heads = [(-self.changelog.rev(h), h) for h in heads]
1162 heads = [(-self.changelog.rev(h), h) for h in heads]
1154 return [n for (r, n) in sorted(heads)]
1163 return [n for (r, n) in sorted(heads)]
1155
1164
1156 def branchheads(self, branch=None, start=None, closed=False):
1165 def branchheads(self, branch=None, start=None, closed=False):
1157 '''return a (possibly filtered) list of heads for the given branch
1166 '''return a (possibly filtered) list of heads for the given branch
1158
1167
1159 Heads are returned in topological order, from newest to oldest.
1168 Heads are returned in topological order, from newest to oldest.
1160 If branch is None, use the dirstate branch.
1169 If branch is None, use the dirstate branch.
1161 If start is not None, return only heads reachable from start.
1170 If start is not None, return only heads reachable from start.
1162 If closed is True, return heads that are marked as closed as well.
1171 If closed is True, return heads that are marked as closed as well.
1163 '''
1172 '''
1164 if branch is None:
1173 if branch is None:
1165 branch = self[None].branch()
1174 branch = self[None].branch()
1166 branches = self.branchmap()
1175 branches = self.branchmap()
1167 if branch not in branches:
1176 if branch not in branches:
1168 return []
1177 return []
1169 # the cache returns heads ordered lowest to highest
1178 # the cache returns heads ordered lowest to highest
1170 bheads = list(reversed(branches[branch]))
1179 bheads = list(reversed(branches[branch]))
1171 if start is not None:
1180 if start is not None:
1172 # filter out the heads that cannot be reached from startrev
1181 # filter out the heads that cannot be reached from startrev
1173 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1182 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1174 bheads = [h for h in bheads if h in fbheads]
1183 bheads = [h for h in bheads if h in fbheads]
1175 if not closed:
1184 if not closed:
1176 bheads = [h for h in bheads if
1185 bheads = [h for h in bheads if
1177 ('close' not in self.changelog.read(h)[5])]
1186 ('close' not in self.changelog.read(h)[5])]
1178 return bheads
1187 return bheads
1179
1188
1180 def branches(self, nodes):
1189 def branches(self, nodes):
1181 if not nodes:
1190 if not nodes:
1182 nodes = [self.changelog.tip()]
1191 nodes = [self.changelog.tip()]
1183 b = []
1192 b = []
1184 for n in nodes:
1193 for n in nodes:
1185 t = n
1194 t = n
1186 while 1:
1195 while 1:
1187 p = self.changelog.parents(n)
1196 p = self.changelog.parents(n)
1188 if p[1] != nullid or p[0] == nullid:
1197 if p[1] != nullid or p[0] == nullid:
1189 b.append((t, n, p[0], p[1]))
1198 b.append((t, n, p[0], p[1]))
1190 break
1199 break
1191 n = p[0]
1200 n = p[0]
1192 return b
1201 return b
1193
1202
1194 def between(self, pairs):
1203 def between(self, pairs):
1195 r = []
1204 r = []
1196
1205
1197 for top, bottom in pairs:
1206 for top, bottom in pairs:
1198 n, l, i = top, [], 0
1207 n, l, i = top, [], 0
1199 f = 1
1208 f = 1
1200
1209
1201 while n != bottom and n != nullid:
1210 while n != bottom and n != nullid:
1202 p = self.changelog.parents(n)[0]
1211 p = self.changelog.parents(n)[0]
1203 if i == f:
1212 if i == f:
1204 l.append(n)
1213 l.append(n)
1205 f = f * 2
1214 f = f * 2
1206 n = p
1215 n = p
1207 i += 1
1216 i += 1
1208
1217
1209 r.append(l)
1218 r.append(l)
1210
1219
1211 return r
1220 return r
1212
1221
1213 def findincoming(self, remote, base=None, heads=None, force=False):
1222 def findincoming(self, remote, base=None, heads=None, force=False):
1214 """Return list of roots of the subsets of missing nodes from remote
1223 """Return list of roots of the subsets of missing nodes from remote
1215
1224
1216 If base dict is specified, assume that these nodes and their parents
1225 If base dict is specified, assume that these nodes and their parents
1217 exist on the remote side and that no child of a node of base exists
1226 exist on the remote side and that no child of a node of base exists
1218 in both remote and self.
1227 in both remote and self.
1219 Furthermore base will be updated to include the nodes that exists
1228 Furthermore base will be updated to include the nodes that exists
1220 in self and remote but no children exists in self and remote.
1229 in self and remote but no children exists in self and remote.
1221 If a list of heads is specified, return only nodes which are heads
1230 If a list of heads is specified, return only nodes which are heads
1222 or ancestors of these heads.
1231 or ancestors of these heads.
1223
1232
1224 All the ancestors of base are in self and in remote.
1233 All the ancestors of base are in self and in remote.
1225 All the descendants of the list returned are missing in self.
1234 All the descendants of the list returned are missing in self.
1226 (and so we know that the rest of the nodes are missing in remote, see
1235 (and so we know that the rest of the nodes are missing in remote, see
1227 outgoing)
1236 outgoing)
1228 """
1237 """
1229 return self.findcommonincoming(remote, base, heads, force)[1]
1238 return self.findcommonincoming(remote, base, heads, force)[1]
1230
1239
1231 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1240 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1232 """Return a tuple (common, missing roots, heads) used to identify
1241 """Return a tuple (common, missing roots, heads) used to identify
1233 missing nodes from remote.
1242 missing nodes from remote.
1234
1243
1235 If base dict is specified, assume that these nodes and their parents
1244 If base dict is specified, assume that these nodes and their parents
1236 exist on the remote side and that no child of a node of base exists
1245 exist on the remote side and that no child of a node of base exists
1237 in both remote and self.
1246 in both remote and self.
1238 Furthermore base will be updated to include the nodes that exists
1247 Furthermore base will be updated to include the nodes that exists
1239 in self and remote but no children exists in self and remote.
1248 in self and remote but no children exists in self and remote.
1240 If a list of heads is specified, return only nodes which are heads
1249 If a list of heads is specified, return only nodes which are heads
1241 or ancestors of these heads.
1250 or ancestors of these heads.
1242
1251
1243 All the ancestors of base are in self and in remote.
1252 All the ancestors of base are in self and in remote.
1244 """
1253 """
1245 m = self.changelog.nodemap
1254 m = self.changelog.nodemap
1246 search = []
1255 search = []
1247 fetch = set()
1256 fetch = set()
1248 seen = set()
1257 seen = set()
1249 seenbranch = set()
1258 seenbranch = set()
1250 if base is None:
1259 if base is None:
1251 base = {}
1260 base = {}
1252
1261
1253 if not heads:
1262 if not heads:
1254 heads = remote.heads()
1263 heads = remote.heads()
1255
1264
1256 if self.changelog.tip() == nullid:
1265 if self.changelog.tip() == nullid:
1257 base[nullid] = 1
1266 base[nullid] = 1
1258 if heads != [nullid]:
1267 if heads != [nullid]:
1259 return [nullid], [nullid], list(heads)
1268 return [nullid], [nullid], list(heads)
1260 return [nullid], [], []
1269 return [nullid], [], []
1261
1270
1262 # assume we're closer to the tip than the root
1271 # assume we're closer to the tip than the root
1263 # and start by examining the heads
1272 # and start by examining the heads
1264 self.ui.status(_("searching for changes\n"))
1273 self.ui.status(_("searching for changes\n"))
1265
1274
1266 unknown = []
1275 unknown = []
1267 for h in heads:
1276 for h in heads:
1268 if h not in m:
1277 if h not in m:
1269 unknown.append(h)
1278 unknown.append(h)
1270 else:
1279 else:
1271 base[h] = 1
1280 base[h] = 1
1272
1281
1273 heads = unknown
1282 heads = unknown
1274 if not unknown:
1283 if not unknown:
1275 return base.keys(), [], []
1284 return base.keys(), [], []
1276
1285
1277 req = set(unknown)
1286 req = set(unknown)
1278 reqcnt = 0
1287 reqcnt = 0
1279
1288
1280 # search through remote branches
1289 # search through remote branches
1281 # a 'branch' here is a linear segment of history, with four parts:
1290 # a 'branch' here is a linear segment of history, with four parts:
1282 # head, root, first parent, second parent
1291 # head, root, first parent, second parent
1283 # (a branch always has two parents (or none) by definition)
1292 # (a branch always has two parents (or none) by definition)
1284 unknown = remote.branches(unknown)
1293 unknown = remote.branches(unknown)
1285 while unknown:
1294 while unknown:
1286 r = []
1295 r = []
1287 while unknown:
1296 while unknown:
1288 n = unknown.pop(0)
1297 n = unknown.pop(0)
1289 if n[0] in seen:
1298 if n[0] in seen:
1290 continue
1299 continue
1291
1300
1292 self.ui.debug("examining %s:%s\n"
1301 self.ui.debug("examining %s:%s\n"
1293 % (short(n[0]), short(n[1])))
1302 % (short(n[0]), short(n[1])))
1294 if n[0] == nullid: # found the end of the branch
1303 if n[0] == nullid: # found the end of the branch
1295 pass
1304 pass
1296 elif n in seenbranch:
1305 elif n in seenbranch:
1297 self.ui.debug("branch already found\n")
1306 self.ui.debug("branch already found\n")
1298 continue
1307 continue
1299 elif n[1] and n[1] in m: # do we know the base?
1308 elif n[1] and n[1] in m: # do we know the base?
1300 self.ui.debug("found incomplete branch %s:%s\n"
1309 self.ui.debug("found incomplete branch %s:%s\n"
1301 % (short(n[0]), short(n[1])))
1310 % (short(n[0]), short(n[1])))
1302 search.append(n[0:2]) # schedule branch range for scanning
1311 search.append(n[0:2]) # schedule branch range for scanning
1303 seenbranch.add(n)
1312 seenbranch.add(n)
1304 else:
1313 else:
1305 if n[1] not in seen and n[1] not in fetch:
1314 if n[1] not in seen and n[1] not in fetch:
1306 if n[2] in m and n[3] in m:
1315 if n[2] in m and n[3] in m:
1307 self.ui.debug("found new changeset %s\n" %
1316 self.ui.debug("found new changeset %s\n" %
1308 short(n[1]))
1317 short(n[1]))
1309 fetch.add(n[1]) # earliest unknown
1318 fetch.add(n[1]) # earliest unknown
1310 for p in n[2:4]:
1319 for p in n[2:4]:
1311 if p in m:
1320 if p in m:
1312 base[p] = 1 # latest known
1321 base[p] = 1 # latest known
1313
1322
1314 for p in n[2:4]:
1323 for p in n[2:4]:
1315 if p not in req and p not in m:
1324 if p not in req and p not in m:
1316 r.append(p)
1325 r.append(p)
1317 req.add(p)
1326 req.add(p)
1318 seen.add(n[0])
1327 seen.add(n[0])
1319
1328
1320 if r:
1329 if r:
1321 reqcnt += 1
1330 reqcnt += 1
1322 self.ui.debug("request %d: %s\n" %
1331 self.ui.debug("request %d: %s\n" %
1323 (reqcnt, " ".join(map(short, r))))
1332 (reqcnt, " ".join(map(short, r))))
1324 for p in xrange(0, len(r), 10):
1333 for p in xrange(0, len(r), 10):
1325 for b in remote.branches(r[p:p+10]):
1334 for b in remote.branches(r[p:p+10]):
1326 self.ui.debug("received %s:%s\n" %
1335 self.ui.debug("received %s:%s\n" %
1327 (short(b[0]), short(b[1])))
1336 (short(b[0]), short(b[1])))
1328 unknown.append(b)
1337 unknown.append(b)
1329
1338
1330 # do binary search on the branches we found
1339 # do binary search on the branches we found
1331 while search:
1340 while search:
1332 newsearch = []
1341 newsearch = []
1333 reqcnt += 1
1342 reqcnt += 1
1334 for n, l in zip(search, remote.between(search)):
1343 for n, l in zip(search, remote.between(search)):
1335 l.append(n[1])
1344 l.append(n[1])
1336 p = n[0]
1345 p = n[0]
1337 f = 1
1346 f = 1
1338 for i in l:
1347 for i in l:
1339 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1348 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1340 if i in m:
1349 if i in m:
1341 if f <= 2:
1350 if f <= 2:
1342 self.ui.debug("found new branch changeset %s\n" %
1351 self.ui.debug("found new branch changeset %s\n" %
1343 short(p))
1352 short(p))
1344 fetch.add(p)
1353 fetch.add(p)
1345 base[i] = 1
1354 base[i] = 1
1346 else:
1355 else:
1347 self.ui.debug("narrowed branch search to %s:%s\n"
1356 self.ui.debug("narrowed branch search to %s:%s\n"
1348 % (short(p), short(i)))
1357 % (short(p), short(i)))
1349 newsearch.append((p, i))
1358 newsearch.append((p, i))
1350 break
1359 break
1351 p, f = i, f * 2
1360 p, f = i, f * 2
1352 search = newsearch
1361 search = newsearch
1353
1362
1354 # sanity check our fetch list
1363 # sanity check our fetch list
1355 for f in fetch:
1364 for f in fetch:
1356 if f in m:
1365 if f in m:
1357 raise error.RepoError(_("already have changeset ")
1366 raise error.RepoError(_("already have changeset ")
1358 + short(f[:4]))
1367 + short(f[:4]))
1359
1368
1360 if base.keys() == [nullid]:
1369 if base.keys() == [nullid]:
1361 if force:
1370 if force:
1362 self.ui.warn(_("warning: repository is unrelated\n"))
1371 self.ui.warn(_("warning: repository is unrelated\n"))
1363 else:
1372 else:
1364 raise util.Abort(_("repository is unrelated"))
1373 raise util.Abort(_("repository is unrelated"))
1365
1374
1366 self.ui.debug("found new changesets starting at " +
1375 self.ui.debug("found new changesets starting at " +
1367 " ".join([short(f) for f in fetch]) + "\n")
1376 " ".join([short(f) for f in fetch]) + "\n")
1368
1377
1369 self.ui.debug("%d total queries\n" % reqcnt)
1378 self.ui.debug("%d total queries\n" % reqcnt)
1370
1379
1371 return base.keys(), list(fetch), heads
1380 return base.keys(), list(fetch), heads
1372
1381
1373 def findoutgoing(self, remote, base=None, heads=None, force=False):
1382 def findoutgoing(self, remote, base=None, heads=None, force=False):
1374 """Return list of nodes that are roots of subsets not in remote
1383 """Return list of nodes that are roots of subsets not in remote
1375
1384
1376 If base dict is specified, assume that these nodes and their parents
1385 If base dict is specified, assume that these nodes and their parents
1377 exist on the remote side.
1386 exist on the remote side.
1378 If a list of heads is specified, return only nodes which are heads
1387 If a list of heads is specified, return only nodes which are heads
1379 or ancestors of these heads, and return a second element which
1388 or ancestors of these heads, and return a second element which
1380 contains all remote heads which get new children.
1389 contains all remote heads which get new children.
1381 """
1390 """
1382 if base is None:
1391 if base is None:
1383 base = {}
1392 base = {}
1384 self.findincoming(remote, base, heads, force=force)
1393 self.findincoming(remote, base, heads, force=force)
1385
1394
1386 self.ui.debug("common changesets up to "
1395 self.ui.debug("common changesets up to "
1387 + " ".join(map(short, base.keys())) + "\n")
1396 + " ".join(map(short, base.keys())) + "\n")
1388
1397
1389 remain = set(self.changelog.nodemap)
1398 remain = set(self.changelog.nodemap)
1390
1399
1391 # prune everything remote has from the tree
1400 # prune everything remote has from the tree
1392 remain.remove(nullid)
1401 remain.remove(nullid)
1393 remove = base.keys()
1402 remove = base.keys()
1394 while remove:
1403 while remove:
1395 n = remove.pop(0)
1404 n = remove.pop(0)
1396 if n in remain:
1405 if n in remain:
1397 remain.remove(n)
1406 remain.remove(n)
1398 for p in self.changelog.parents(n):
1407 for p in self.changelog.parents(n):
1399 remove.append(p)
1408 remove.append(p)
1400
1409
1401 # find every node whose parents have been pruned
1410 # find every node whose parents have been pruned
1402 subset = []
1411 subset = []
1403 # find every remote head that will get new children
1412 # find every remote head that will get new children
1404 updated_heads = set()
1413 updated_heads = set()
1405 for n in remain:
1414 for n in remain:
1406 p1, p2 = self.changelog.parents(n)
1415 p1, p2 = self.changelog.parents(n)
1407 if p1 not in remain and p2 not in remain:
1416 if p1 not in remain and p2 not in remain:
1408 subset.append(n)
1417 subset.append(n)
1409 if heads:
1418 if heads:
1410 if p1 in heads:
1419 if p1 in heads:
1411 updated_heads.add(p1)
1420 updated_heads.add(p1)
1412 if p2 in heads:
1421 if p2 in heads:
1413 updated_heads.add(p2)
1422 updated_heads.add(p2)
1414
1423
1415 # this is the set of all roots we have to push
1424 # this is the set of all roots we have to push
1416 if heads:
1425 if heads:
1417 return subset, list(updated_heads)
1426 return subset, list(updated_heads)
1418 else:
1427 else:
1419 return subset
1428 return subset
1420
1429
1421 def pull(self, remote, heads=None, force=False):
1430 def pull(self, remote, heads=None, force=False):
1422 lock = self.lock()
1431 lock = self.lock()
1423 try:
1432 try:
1424 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1433 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1425 force=force)
1434 force=force)
1426 if fetch == [nullid]:
1435 if fetch == [nullid]:
1427 self.ui.status(_("requesting all changes\n"))
1436 self.ui.status(_("requesting all changes\n"))
1428
1437
1429 if not fetch:
1438 if not fetch:
1430 self.ui.status(_("no changes found\n"))
1439 self.ui.status(_("no changes found\n"))
1431 return 0
1440 return 0
1432
1441
1433 if heads is None and remote.capable('changegroupsubset'):
1442 if heads is None and remote.capable('changegroupsubset'):
1434 heads = rheads
1443 heads = rheads
1435
1444
1436 if heads is None:
1445 if heads is None:
1437 cg = remote.changegroup(fetch, 'pull')
1446 cg = remote.changegroup(fetch, 'pull')
1438 else:
1447 else:
1439 if not remote.capable('changegroupsubset'):
1448 if not remote.capable('changegroupsubset'):
1440 raise util.Abort(_("Partial pull cannot be done because "
1449 raise util.Abort(_("Partial pull cannot be done because "
1441 "other repository doesn't support "
1450 "other repository doesn't support "
1442 "changegroupsubset."))
1451 "changegroupsubset."))
1443 cg = remote.changegroupsubset(fetch, heads, 'pull')
1452 cg = remote.changegroupsubset(fetch, heads, 'pull')
1444 return self.addchangegroup(cg, 'pull', remote.url())
1453 return self.addchangegroup(cg, 'pull', remote.url())
1445 finally:
1454 finally:
1446 lock.release()
1455 lock.release()
1447
1456
1448 def push(self, remote, force=False, revs=None):
1457 def push(self, remote, force=False, revs=None):
1449 # there are two ways to push to remote repo:
1458 # there are two ways to push to remote repo:
1450 #
1459 #
1451 # addchangegroup assumes local user can lock remote
1460 # addchangegroup assumes local user can lock remote
1452 # repo (local filesystem, old ssh servers).
1461 # repo (local filesystem, old ssh servers).
1453 #
1462 #
1454 # unbundle assumes local user cannot lock remote repo (new ssh
1463 # unbundle assumes local user cannot lock remote repo (new ssh
1455 # servers, http servers).
1464 # servers, http servers).
1456
1465
1457 if remote.capable('unbundle'):
1466 if remote.capable('unbundle'):
1458 return self.push_unbundle(remote, force, revs)
1467 return self.push_unbundle(remote, force, revs)
1459 return self.push_addchangegroup(remote, force, revs)
1468 return self.push_addchangegroup(remote, force, revs)
1460
1469
1461 def prepush(self, remote, force, revs):
1470 def prepush(self, remote, force, revs):
1462 '''Analyze the local and remote repositories and determine which
1471 '''Analyze the local and remote repositories and determine which
1463 changesets need to be pushed to the remote. Return a tuple
1472 changesets need to be pushed to the remote. Return a tuple
1464 (changegroup, remoteheads). changegroup is a readable file-like
1473 (changegroup, remoteheads). changegroup is a readable file-like
1465 object whose read() returns successive changegroup chunks ready to
1474 object whose read() returns successive changegroup chunks ready to
1466 be sent over the wire. remoteheads is the list of remote heads.
1475 be sent over the wire. remoteheads is the list of remote heads.
1467 '''
1476 '''
1468 common = {}
1477 common = {}
1469 remote_heads = remote.heads()
1478 remote_heads = remote.heads()
1470 inc = self.findincoming(remote, common, remote_heads, force=force)
1479 inc = self.findincoming(remote, common, remote_heads, force=force)
1471
1480
1472 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1481 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1473 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1482 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1474
1483
1475 def checkbranch(lheads, rheads, updatelb):
1484 def checkbranch(lheads, rheads, updatelb):
1476 '''
1485 '''
1477 check whether there are more local heads than remote heads on
1486 check whether there are more local heads than remote heads on
1478 a specific branch.
1487 a specific branch.
1479
1488
1480 lheads: local branch heads
1489 lheads: local branch heads
1481 rheads: remote branch heads
1490 rheads: remote branch heads
1482 updatelb: outgoing local branch bases
1491 updatelb: outgoing local branch bases
1483 '''
1492 '''
1484
1493
1485 warn = 0
1494 warn = 0
1486
1495
1487 if not revs and len(lheads) > len(rheads):
1496 if not revs and len(lheads) > len(rheads):
1488 warn = 1
1497 warn = 1
1489 else:
1498 else:
1490 # add local heads involved in the push
1499 # add local heads involved in the push
1491 updatelheads = [self.changelog.heads(x, lheads)
1500 updatelheads = [self.changelog.heads(x, lheads)
1492 for x in updatelb]
1501 for x in updatelb]
1493 newheads = set(sum(updatelheads, [])) & set(lheads)
1502 newheads = set(sum(updatelheads, [])) & set(lheads)
1494
1503
1495 if not newheads:
1504 if not newheads:
1496 return True
1505 return True
1497
1506
1498 # add heads we don't have or that are not involved in the push
1507 # add heads we don't have or that are not involved in the push
1499 for r in rheads:
1508 for r in rheads:
1500 if r in self.changelog.nodemap:
1509 if r in self.changelog.nodemap:
1501 desc = self.changelog.heads(r, heads)
1510 desc = self.changelog.heads(r, heads)
1502 l = [h for h in heads if h in desc]
1511 l = [h for h in heads if h in desc]
1503 if not l:
1512 if not l:
1504 newheads.add(r)
1513 newheads.add(r)
1505 else:
1514 else:
1506 newheads.add(r)
1515 newheads.add(r)
1507 if len(newheads) > len(rheads):
1516 if len(newheads) > len(rheads):
1508 warn = 1
1517 warn = 1
1509
1518
1510 if warn:
1519 if warn:
1511 if not rheads: # new branch requires --force
1520 if not rheads: # new branch requires --force
1512 self.ui.warn(_("abort: push creates new"
1521 self.ui.warn(_("abort: push creates new"
1513 " remote branch '%s'!\n") %
1522 " remote branch '%s'!\n") %
1514 self[updatelb[0]].branch())
1523 self[updatelb[0]].branch())
1515 else:
1524 else:
1516 self.ui.warn(_("abort: push creates new remote heads!\n"))
1525 self.ui.warn(_("abort: push creates new remote heads!\n"))
1517
1526
1518 self.ui.status(_("(did you forget to merge?"
1527 self.ui.status(_("(did you forget to merge?"
1519 " use push -f to force)\n"))
1528 " use push -f to force)\n"))
1520 return False
1529 return False
1521 return True
1530 return True
1522
1531
1523 if not bases:
1532 if not bases:
1524 self.ui.status(_("no changes found\n"))
1533 self.ui.status(_("no changes found\n"))
1525 return None, 1
1534 return None, 1
1526 elif not force:
1535 elif not force:
1527 # Check for each named branch if we're creating new remote heads.
1536 # Check for each named branch if we're creating new remote heads.
1528 # To be a remote head after push, node must be either:
1537 # To be a remote head after push, node must be either:
1529 # - unknown locally
1538 # - unknown locally
1530 # - a local outgoing head descended from update
1539 # - a local outgoing head descended from update
1531 # - a remote head that's known locally and not
1540 # - a remote head that's known locally and not
1532 # ancestral to an outgoing head
1541 # ancestral to an outgoing head
1533 #
1542 #
1534 # New named branches cannot be created without --force.
1543 # New named branches cannot be created without --force.
1535
1544
1536 if remote_heads != [nullid]:
1545 if remote_heads != [nullid]:
1537 if remote.capable('branchmap'):
1546 if remote.capable('branchmap'):
1538 localhds = {}
1547 localhds = {}
1539 if not revs:
1548 if not revs:
1540 localhds = self.branchmap()
1549 localhds = self.branchmap()
1541 else:
1550 else:
1542 for n in heads:
1551 for n in heads:
1543 branch = self[n].branch()
1552 branch = self[n].branch()
1544 if branch in localhds:
1553 if branch in localhds:
1545 localhds[branch].append(n)
1554 localhds[branch].append(n)
1546 else:
1555 else:
1547 localhds[branch] = [n]
1556 localhds[branch] = [n]
1548
1557
1549 remotehds = remote.branchmap()
1558 remotehds = remote.branchmap()
1550
1559
1551 for lh in localhds:
1560 for lh in localhds:
1552 if lh in remotehds:
1561 if lh in remotehds:
1553 rheads = remotehds[lh]
1562 rheads = remotehds[lh]
1554 else:
1563 else:
1555 rheads = []
1564 rheads = []
1556 lheads = localhds[lh]
1565 lheads = localhds[lh]
1557 updatelb = [upd for upd in update
1566 updatelb = [upd for upd in update
1558 if self[upd].branch() == lh]
1567 if self[upd].branch() == lh]
1559 if not updatelb:
1568 if not updatelb:
1560 continue
1569 continue
1561 if not checkbranch(lheads, rheads, updatelb):
1570 if not checkbranch(lheads, rheads, updatelb):
1562 return None, 0
1571 return None, 0
1563 else:
1572 else:
1564 if not checkbranch(heads, remote_heads, update):
1573 if not checkbranch(heads, remote_heads, update):
1565 return None, 0
1574 return None, 0
1566
1575
1567 if inc:
1576 if inc:
1568 self.ui.warn(_("note: unsynced remote changes!\n"))
1577 self.ui.warn(_("note: unsynced remote changes!\n"))
1569
1578
1570
1579
1571 if revs is None:
1580 if revs is None:
1572 # use the fast path, no race possible on push
1581 # use the fast path, no race possible on push
1573 nodes = self.changelog.findmissing(common.keys())
1582 nodes = self.changelog.findmissing(common.keys())
1574 cg = self._changegroup(nodes, 'push')
1583 cg = self._changegroup(nodes, 'push')
1575 else:
1584 else:
1576 cg = self.changegroupsubset(update, revs, 'push')
1585 cg = self.changegroupsubset(update, revs, 'push')
1577 return cg, remote_heads
1586 return cg, remote_heads
1578
1587
1579 def push_addchangegroup(self, remote, force, revs):
1588 def push_addchangegroup(self, remote, force, revs):
1580 lock = remote.lock()
1589 lock = remote.lock()
1581 try:
1590 try:
1582 ret = self.prepush(remote, force, revs)
1591 ret = self.prepush(remote, force, revs)
1583 if ret[0] is not None:
1592 if ret[0] is not None:
1584 cg, remote_heads = ret
1593 cg, remote_heads = ret
1585 return remote.addchangegroup(cg, 'push', self.url())
1594 return remote.addchangegroup(cg, 'push', self.url())
1586 return ret[1]
1595 return ret[1]
1587 finally:
1596 finally:
1588 lock.release()
1597 lock.release()
1589
1598
1590 def push_unbundle(self, remote, force, revs):
1599 def push_unbundle(self, remote, force, revs):
1591 # local repo finds heads on server, finds out what revs it
1600 # local repo finds heads on server, finds out what revs it
1592 # must push. once revs transferred, if server finds it has
1601 # must push. once revs transferred, if server finds it has
1593 # different heads (someone else won commit/push race), server
1602 # different heads (someone else won commit/push race), server
1594 # aborts.
1603 # aborts.
1595
1604
1596 ret = self.prepush(remote, force, revs)
1605 ret = self.prepush(remote, force, revs)
1597 if ret[0] is not None:
1606 if ret[0] is not None:
1598 cg, remote_heads = ret
1607 cg, remote_heads = ret
1599 if force: remote_heads = ['force']
1608 if force: remote_heads = ['force']
1600 return remote.unbundle(cg, remote_heads, 'push')
1609 return remote.unbundle(cg, remote_heads, 'push')
1601 return ret[1]
1610 return ret[1]
1602
1611
1603 def changegroupinfo(self, nodes, source):
1612 def changegroupinfo(self, nodes, source):
1604 if self.ui.verbose or source == 'bundle':
1613 if self.ui.verbose or source == 'bundle':
1605 self.ui.status(_("%d changesets found\n") % len(nodes))
1614 self.ui.status(_("%d changesets found\n") % len(nodes))
1606 if self.ui.debugflag:
1615 if self.ui.debugflag:
1607 self.ui.debug("list of changesets:\n")
1616 self.ui.debug("list of changesets:\n")
1608 for node in nodes:
1617 for node in nodes:
1609 self.ui.debug("%s\n" % hex(node))
1618 self.ui.debug("%s\n" % hex(node))
1610
1619
1611 def changegroupsubset(self, bases, heads, source, extranodes=None):
1620 def changegroupsubset(self, bases, heads, source, extranodes=None):
1612 """Compute a changegroup consisting of all the nodes that are
1621 """Compute a changegroup consisting of all the nodes that are
1613 descendents of any of the bases and ancestors of any of the heads.
1622 descendents of any of the bases and ancestors of any of the heads.
1614 Return a chunkbuffer object whose read() method will return
1623 Return a chunkbuffer object whose read() method will return
1615 successive changegroup chunks.
1624 successive changegroup chunks.
1616
1625
1617 It is fairly complex as determining which filenodes and which
1626 It is fairly complex as determining which filenodes and which
1618 manifest nodes need to be included for the changeset to be complete
1627 manifest nodes need to be included for the changeset to be complete
1619 is non-trivial.
1628 is non-trivial.
1620
1629
1621 Another wrinkle is doing the reverse, figuring out which changeset in
1630 Another wrinkle is doing the reverse, figuring out which changeset in
1622 the changegroup a particular filenode or manifestnode belongs to.
1631 the changegroup a particular filenode or manifestnode belongs to.
1623
1632
1624 The caller can specify some nodes that must be included in the
1633 The caller can specify some nodes that must be included in the
1625 changegroup using the extranodes argument. It should be a dict
1634 changegroup using the extranodes argument. It should be a dict
1626 where the keys are the filenames (or 1 for the manifest), and the
1635 where the keys are the filenames (or 1 for the manifest), and the
1627 values are lists of (node, linknode) tuples, where node is a wanted
1636 values are lists of (node, linknode) tuples, where node is a wanted
1628 node and linknode is the changelog node that should be transmitted as
1637 node and linknode is the changelog node that should be transmitted as
1629 the linkrev.
1638 the linkrev.
1630 """
1639 """
1631
1640
1632 # Set up some initial variables
1641 # Set up some initial variables
1633 # Make it easy to refer to self.changelog
1642 # Make it easy to refer to self.changelog
1634 cl = self.changelog
1643 cl = self.changelog
1635 # msng is short for missing - compute the list of changesets in this
1644 # msng is short for missing - compute the list of changesets in this
1636 # changegroup.
1645 # changegroup.
1637 if not bases:
1646 if not bases:
1638 bases = [nullid]
1647 bases = [nullid]
1639 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1648 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1640
1649
1641 if extranodes is None:
1650 if extranodes is None:
1642 # can we go through the fast path ?
1651 # can we go through the fast path ?
1643 heads.sort()
1652 heads.sort()
1644 allheads = self.heads()
1653 allheads = self.heads()
1645 allheads.sort()
1654 allheads.sort()
1646 if heads == allheads:
1655 if heads == allheads:
1647 return self._changegroup(msng_cl_lst, source)
1656 return self._changegroup(msng_cl_lst, source)
1648
1657
1649 # slow path
1658 # slow path
1650 self.hook('preoutgoing', throw=True, source=source)
1659 self.hook('preoutgoing', throw=True, source=source)
1651
1660
1652 self.changegroupinfo(msng_cl_lst, source)
1661 self.changegroupinfo(msng_cl_lst, source)
1653 # Some bases may turn out to be superfluous, and some heads may be
1662 # Some bases may turn out to be superfluous, and some heads may be
1654 # too. nodesbetween will return the minimal set of bases and heads
1663 # too. nodesbetween will return the minimal set of bases and heads
1655 # necessary to re-create the changegroup.
1664 # necessary to re-create the changegroup.
1656
1665
1657 # Known heads are the list of heads that it is assumed the recipient
1666 # Known heads are the list of heads that it is assumed the recipient
1658 # of this changegroup will know about.
1667 # of this changegroup will know about.
1659 knownheads = set()
1668 knownheads = set()
1660 # We assume that all parents of bases are known heads.
1669 # We assume that all parents of bases are known heads.
1661 for n in bases:
1670 for n in bases:
1662 knownheads.update(cl.parents(n))
1671 knownheads.update(cl.parents(n))
1663 knownheads.discard(nullid)
1672 knownheads.discard(nullid)
1664 knownheads = list(knownheads)
1673 knownheads = list(knownheads)
1665 if knownheads:
1674 if knownheads:
1666 # Now that we know what heads are known, we can compute which
1675 # Now that we know what heads are known, we can compute which
1667 # changesets are known. The recipient must know about all
1676 # changesets are known. The recipient must know about all
1668 # changesets required to reach the known heads from the null
1677 # changesets required to reach the known heads from the null
1669 # changeset.
1678 # changeset.
1670 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1679 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1671 junk = None
1680 junk = None
1672 # Transform the list into a set.
1681 # Transform the list into a set.
1673 has_cl_set = set(has_cl_set)
1682 has_cl_set = set(has_cl_set)
1674 else:
1683 else:
1675 # If there were no known heads, the recipient cannot be assumed to
1684 # If there were no known heads, the recipient cannot be assumed to
1676 # know about any changesets.
1685 # know about any changesets.
1677 has_cl_set = set()
1686 has_cl_set = set()
1678
1687
1679 # Make it easy to refer to self.manifest
1688 # Make it easy to refer to self.manifest
1680 mnfst = self.manifest
1689 mnfst = self.manifest
1681 # We don't know which manifests are missing yet
1690 # We don't know which manifests are missing yet
1682 msng_mnfst_set = {}
1691 msng_mnfst_set = {}
1683 # Nor do we know which filenodes are missing.
1692 # Nor do we know which filenodes are missing.
1684 msng_filenode_set = {}
1693 msng_filenode_set = {}
1685
1694
1686 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1695 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1687 junk = None
1696 junk = None
1688
1697
1689 # A changeset always belongs to itself, so the changenode lookup
1698 # A changeset always belongs to itself, so the changenode lookup
1690 # function for a changenode is identity.
1699 # function for a changenode is identity.
1691 def identity(x):
1700 def identity(x):
1692 return x
1701 return x
1693
1702
1694 # If we determine that a particular file or manifest node must be a
1703 # If we determine that a particular file or manifest node must be a
1695 # node that the recipient of the changegroup will already have, we can
1704 # node that the recipient of the changegroup will already have, we can
1696 # also assume the recipient will have all the parents. This function
1705 # also assume the recipient will have all the parents. This function
1697 # prunes them from the set of missing nodes.
1706 # prunes them from the set of missing nodes.
1698 def prune_parents(revlog, hasset, msngset):
1707 def prune_parents(revlog, hasset, msngset):
1699 haslst = list(hasset)
1708 haslst = list(hasset)
1700 haslst.sort(key=revlog.rev)
1709 haslst.sort(key=revlog.rev)
1701 for node in haslst:
1710 for node in haslst:
1702 parentlst = [p for p in revlog.parents(node) if p != nullid]
1711 parentlst = [p for p in revlog.parents(node) if p != nullid]
1703 while parentlst:
1712 while parentlst:
1704 n = parentlst.pop()
1713 n = parentlst.pop()
1705 if n not in hasset:
1714 if n not in hasset:
1706 hasset.add(n)
1715 hasset.add(n)
1707 p = [p for p in revlog.parents(n) if p != nullid]
1716 p = [p for p in revlog.parents(n) if p != nullid]
1708 parentlst.extend(p)
1717 parentlst.extend(p)
1709 for n in hasset:
1718 for n in hasset:
1710 msngset.pop(n, None)
1719 msngset.pop(n, None)
1711
1720
1712 # This is a function generating function used to set up an environment
1721 # This is a function generating function used to set up an environment
1713 # for the inner function to execute in.
1722 # for the inner function to execute in.
1714 def manifest_and_file_collector(changedfileset):
1723 def manifest_and_file_collector(changedfileset):
1715 # This is an information gathering function that gathers
1724 # This is an information gathering function that gathers
1716 # information from each changeset node that goes out as part of
1725 # information from each changeset node that goes out as part of
1717 # the changegroup. The information gathered is a list of which
1726 # the changegroup. The information gathered is a list of which
1718 # manifest nodes are potentially required (the recipient may
1727 # manifest nodes are potentially required (the recipient may
1719 # already have them) and total list of all files which were
1728 # already have them) and total list of all files which were
1720 # changed in any changeset in the changegroup.
1729 # changed in any changeset in the changegroup.
1721 #
1730 #
1722 # We also remember the first changenode we saw any manifest
1731 # We also remember the first changenode we saw any manifest
1723 # referenced by so we can later determine which changenode 'owns'
1732 # referenced by so we can later determine which changenode 'owns'
1724 # the manifest.
1733 # the manifest.
1725 def collect_manifests_and_files(clnode):
1734 def collect_manifests_and_files(clnode):
1726 c = cl.read(clnode)
1735 c = cl.read(clnode)
1727 for f in c[3]:
1736 for f in c[3]:
1728 # This is to make sure we only have one instance of each
1737 # This is to make sure we only have one instance of each
1729 # filename string for each filename.
1738 # filename string for each filename.
1730 changedfileset.setdefault(f, f)
1739 changedfileset.setdefault(f, f)
1731 msng_mnfst_set.setdefault(c[0], clnode)
1740 msng_mnfst_set.setdefault(c[0], clnode)
1732 return collect_manifests_and_files
1741 return collect_manifests_and_files
1733
1742
1734 # Figure out which manifest nodes (of the ones we think might be part
1743 # Figure out which manifest nodes (of the ones we think might be part
1735 # of the changegroup) the recipient must know about and remove them
1744 # of the changegroup) the recipient must know about and remove them
1736 # from the changegroup.
1745 # from the changegroup.
1737 def prune_manifests():
1746 def prune_manifests():
1738 has_mnfst_set = set()
1747 has_mnfst_set = set()
1739 for n in msng_mnfst_set:
1748 for n in msng_mnfst_set:
1740 # If a 'missing' manifest thinks it belongs to a changenode
1749 # If a 'missing' manifest thinks it belongs to a changenode
1741 # the recipient is assumed to have, obviously the recipient
1750 # the recipient is assumed to have, obviously the recipient
1742 # must have that manifest.
1751 # must have that manifest.
1743 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1752 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1744 if linknode in has_cl_set:
1753 if linknode in has_cl_set:
1745 has_mnfst_set.add(n)
1754 has_mnfst_set.add(n)
1746 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1755 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1747
1756
1748 # Use the information collected in collect_manifests_and_files to say
1757 # Use the information collected in collect_manifests_and_files to say
1749 # which changenode any manifestnode belongs to.
1758 # which changenode any manifestnode belongs to.
1750 def lookup_manifest_link(mnfstnode):
1759 def lookup_manifest_link(mnfstnode):
1751 return msng_mnfst_set[mnfstnode]
1760 return msng_mnfst_set[mnfstnode]
1752
1761
1753 # A function generating function that sets up the initial environment
1762 # A function generating function that sets up the initial environment
1754 # the inner function.
1763 # the inner function.
1755 def filenode_collector(changedfiles):
1764 def filenode_collector(changedfiles):
1756 next_rev = [0]
1765 next_rev = [0]
1757 # This gathers information from each manifestnode included in the
1766 # This gathers information from each manifestnode included in the
1758 # changegroup about which filenodes the manifest node references
1767 # changegroup about which filenodes the manifest node references
1759 # so we can include those in the changegroup too.
1768 # so we can include those in the changegroup too.
1760 #
1769 #
1761 # It also remembers which changenode each filenode belongs to. It
1770 # It also remembers which changenode each filenode belongs to. It
1762 # does this by assuming the a filenode belongs to the changenode
1771 # does this by assuming the a filenode belongs to the changenode
1763 # the first manifest that references it belongs to.
1772 # the first manifest that references it belongs to.
1764 def collect_msng_filenodes(mnfstnode):
1773 def collect_msng_filenodes(mnfstnode):
1765 r = mnfst.rev(mnfstnode)
1774 r = mnfst.rev(mnfstnode)
1766 if r == next_rev[0]:
1775 if r == next_rev[0]:
1767 # If the last rev we looked at was the one just previous,
1776 # If the last rev we looked at was the one just previous,
1768 # we only need to see a diff.
1777 # we only need to see a diff.
1769 deltamf = mnfst.readdelta(mnfstnode)
1778 deltamf = mnfst.readdelta(mnfstnode)
1770 # For each line in the delta
1779 # For each line in the delta
1771 for f, fnode in deltamf.iteritems():
1780 for f, fnode in deltamf.iteritems():
1772 f = changedfiles.get(f, None)
1781 f = changedfiles.get(f, None)
1773 # And if the file is in the list of files we care
1782 # And if the file is in the list of files we care
1774 # about.
1783 # about.
1775 if f is not None:
1784 if f is not None:
1776 # Get the changenode this manifest belongs to
1785 # Get the changenode this manifest belongs to
1777 clnode = msng_mnfst_set[mnfstnode]
1786 clnode = msng_mnfst_set[mnfstnode]
1778 # Create the set of filenodes for the file if
1787 # Create the set of filenodes for the file if
1779 # there isn't one already.
1788 # there isn't one already.
1780 ndset = msng_filenode_set.setdefault(f, {})
1789 ndset = msng_filenode_set.setdefault(f, {})
1781 # And set the filenode's changelog node to the
1790 # And set the filenode's changelog node to the
1782 # manifest's if it hasn't been set already.
1791 # manifest's if it hasn't been set already.
1783 ndset.setdefault(fnode, clnode)
1792 ndset.setdefault(fnode, clnode)
1784 else:
1793 else:
1785 # Otherwise we need a full manifest.
1794 # Otherwise we need a full manifest.
1786 m = mnfst.read(mnfstnode)
1795 m = mnfst.read(mnfstnode)
1787 # For every file in we care about.
1796 # For every file in we care about.
1788 for f in changedfiles:
1797 for f in changedfiles:
1789 fnode = m.get(f, None)
1798 fnode = m.get(f, None)
1790 # If it's in the manifest
1799 # If it's in the manifest
1791 if fnode is not None:
1800 if fnode is not None:
1792 # See comments above.
1801 # See comments above.
1793 clnode = msng_mnfst_set[mnfstnode]
1802 clnode = msng_mnfst_set[mnfstnode]
1794 ndset = msng_filenode_set.setdefault(f, {})
1803 ndset = msng_filenode_set.setdefault(f, {})
1795 ndset.setdefault(fnode, clnode)
1804 ndset.setdefault(fnode, clnode)
1796 # Remember the revision we hope to see next.
1805 # Remember the revision we hope to see next.
1797 next_rev[0] = r + 1
1806 next_rev[0] = r + 1
1798 return collect_msng_filenodes
1807 return collect_msng_filenodes
1799
1808
1800 # We have a list of filenodes we think we need for a file, lets remove
1809 # We have a list of filenodes we think we need for a file, lets remove
1801 # all those we know the recipient must have.
1810 # all those we know the recipient must have.
1802 def prune_filenodes(f, filerevlog):
1811 def prune_filenodes(f, filerevlog):
1803 msngset = msng_filenode_set[f]
1812 msngset = msng_filenode_set[f]
1804 hasset = set()
1813 hasset = set()
1805 # If a 'missing' filenode thinks it belongs to a changenode we
1814 # If a 'missing' filenode thinks it belongs to a changenode we
1806 # assume the recipient must have, then the recipient must have
1815 # assume the recipient must have, then the recipient must have
1807 # that filenode.
1816 # that filenode.
1808 for n in msngset:
1817 for n in msngset:
1809 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1818 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1810 if clnode in has_cl_set:
1819 if clnode in has_cl_set:
1811 hasset.add(n)
1820 hasset.add(n)
1812 prune_parents(filerevlog, hasset, msngset)
1821 prune_parents(filerevlog, hasset, msngset)
1813
1822
1814 # A function generator function that sets up the a context for the
1823 # A function generator function that sets up the a context for the
1815 # inner function.
1824 # inner function.
1816 def lookup_filenode_link_func(fname):
1825 def lookup_filenode_link_func(fname):
1817 msngset = msng_filenode_set[fname]
1826 msngset = msng_filenode_set[fname]
1818 # Lookup the changenode the filenode belongs to.
1827 # Lookup the changenode the filenode belongs to.
1819 def lookup_filenode_link(fnode):
1828 def lookup_filenode_link(fnode):
1820 return msngset[fnode]
1829 return msngset[fnode]
1821 return lookup_filenode_link
1830 return lookup_filenode_link
1822
1831
1823 # Add the nodes that were explicitly requested.
1832 # Add the nodes that were explicitly requested.
1824 def add_extra_nodes(name, nodes):
1833 def add_extra_nodes(name, nodes):
1825 if not extranodes or name not in extranodes:
1834 if not extranodes or name not in extranodes:
1826 return
1835 return
1827
1836
1828 for node, linknode in extranodes[name]:
1837 for node, linknode in extranodes[name]:
1829 if node not in nodes:
1838 if node not in nodes:
1830 nodes[node] = linknode
1839 nodes[node] = linknode
1831
1840
1832 # Now that we have all theses utility functions to help out and
1841 # Now that we have all theses utility functions to help out and
1833 # logically divide up the task, generate the group.
1842 # logically divide up the task, generate the group.
1834 def gengroup():
1843 def gengroup():
1835 # The set of changed files starts empty.
1844 # The set of changed files starts empty.
1836 changedfiles = {}
1845 changedfiles = {}
1837 # Create a changenode group generator that will call our functions
1846 # Create a changenode group generator that will call our functions
1838 # back to lookup the owning changenode and collect information.
1847 # back to lookup the owning changenode and collect information.
1839 group = cl.group(msng_cl_lst, identity,
1848 group = cl.group(msng_cl_lst, identity,
1840 manifest_and_file_collector(changedfiles))
1849 manifest_and_file_collector(changedfiles))
1841 for chnk in group:
1850 for chnk in group:
1842 yield chnk
1851 yield chnk
1843
1852
1844 # The list of manifests has been collected by the generator
1853 # The list of manifests has been collected by the generator
1845 # calling our functions back.
1854 # calling our functions back.
1846 prune_manifests()
1855 prune_manifests()
1847 add_extra_nodes(1, msng_mnfst_set)
1856 add_extra_nodes(1, msng_mnfst_set)
1848 msng_mnfst_lst = msng_mnfst_set.keys()
1857 msng_mnfst_lst = msng_mnfst_set.keys()
1849 # Sort the manifestnodes by revision number.
1858 # Sort the manifestnodes by revision number.
1850 msng_mnfst_lst.sort(key=mnfst.rev)
1859 msng_mnfst_lst.sort(key=mnfst.rev)
1851 # Create a generator for the manifestnodes that calls our lookup
1860 # Create a generator for the manifestnodes that calls our lookup
1852 # and data collection functions back.
1861 # and data collection functions back.
1853 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1862 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1854 filenode_collector(changedfiles))
1863 filenode_collector(changedfiles))
1855 for chnk in group:
1864 for chnk in group:
1856 yield chnk
1865 yield chnk
1857
1866
1858 # These are no longer needed, dereference and toss the memory for
1867 # These are no longer needed, dereference and toss the memory for
1859 # them.
1868 # them.
1860 msng_mnfst_lst = None
1869 msng_mnfst_lst = None
1861 msng_mnfst_set.clear()
1870 msng_mnfst_set.clear()
1862
1871
1863 if extranodes:
1872 if extranodes:
1864 for fname in extranodes:
1873 for fname in extranodes:
1865 if isinstance(fname, int):
1874 if isinstance(fname, int):
1866 continue
1875 continue
1867 msng_filenode_set.setdefault(fname, {})
1876 msng_filenode_set.setdefault(fname, {})
1868 changedfiles[fname] = 1
1877 changedfiles[fname] = 1
1869 # Go through all our files in order sorted by name.
1878 # Go through all our files in order sorted by name.
1870 for fname in sorted(changedfiles):
1879 for fname in sorted(changedfiles):
1871 filerevlog = self.file(fname)
1880 filerevlog = self.file(fname)
1872 if not len(filerevlog):
1881 if not len(filerevlog):
1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1882 raise util.Abort(_("empty or missing revlog for %s") % fname)
1874 # Toss out the filenodes that the recipient isn't really
1883 # Toss out the filenodes that the recipient isn't really
1875 # missing.
1884 # missing.
1876 if fname in msng_filenode_set:
1885 if fname in msng_filenode_set:
1877 prune_filenodes(fname, filerevlog)
1886 prune_filenodes(fname, filerevlog)
1878 add_extra_nodes(fname, msng_filenode_set[fname])
1887 add_extra_nodes(fname, msng_filenode_set[fname])
1879 msng_filenode_lst = msng_filenode_set[fname].keys()
1888 msng_filenode_lst = msng_filenode_set[fname].keys()
1880 else:
1889 else:
1881 msng_filenode_lst = []
1890 msng_filenode_lst = []
1882 # If any filenodes are left, generate the group for them,
1891 # If any filenodes are left, generate the group for them,
1883 # otherwise don't bother.
1892 # otherwise don't bother.
1884 if len(msng_filenode_lst) > 0:
1893 if len(msng_filenode_lst) > 0:
1885 yield changegroup.chunkheader(len(fname))
1894 yield changegroup.chunkheader(len(fname))
1886 yield fname
1895 yield fname
1887 # Sort the filenodes by their revision #
1896 # Sort the filenodes by their revision #
1888 msng_filenode_lst.sort(key=filerevlog.rev)
1897 msng_filenode_lst.sort(key=filerevlog.rev)
1889 # Create a group generator and only pass in a changenode
1898 # Create a group generator and only pass in a changenode
1890 # lookup function as we need to collect no information
1899 # lookup function as we need to collect no information
1891 # from filenodes.
1900 # from filenodes.
1892 group = filerevlog.group(msng_filenode_lst,
1901 group = filerevlog.group(msng_filenode_lst,
1893 lookup_filenode_link_func(fname))
1902 lookup_filenode_link_func(fname))
1894 for chnk in group:
1903 for chnk in group:
1895 yield chnk
1904 yield chnk
1896 if fname in msng_filenode_set:
1905 if fname in msng_filenode_set:
1897 # Don't need this anymore, toss it to free memory.
1906 # Don't need this anymore, toss it to free memory.
1898 del msng_filenode_set[fname]
1907 del msng_filenode_set[fname]
1899 # Signal that no more groups are left.
1908 # Signal that no more groups are left.
1900 yield changegroup.closechunk()
1909 yield changegroup.closechunk()
1901
1910
1902 if msng_cl_lst:
1911 if msng_cl_lst:
1903 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1912 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1904
1913
1905 return util.chunkbuffer(gengroup())
1914 return util.chunkbuffer(gengroup())
1906
1915
1907 def changegroup(self, basenodes, source):
1916 def changegroup(self, basenodes, source):
1908 # to avoid a race we use changegroupsubset() (issue1320)
1917 # to avoid a race we use changegroupsubset() (issue1320)
1909 return self.changegroupsubset(basenodes, self.heads(), source)
1918 return self.changegroupsubset(basenodes, self.heads(), source)
1910
1919
1911 def _changegroup(self, nodes, source):
1920 def _changegroup(self, nodes, source):
1912 """Compute the changegroup of all nodes that we have that a recipient
1921 """Compute the changegroup of all nodes that we have that a recipient
1913 doesn't. Return a chunkbuffer object whose read() method will return
1922 doesn't. Return a chunkbuffer object whose read() method will return
1914 successive changegroup chunks.
1923 successive changegroup chunks.
1915
1924
1916 This is much easier than the previous function as we can assume that
1925 This is much easier than the previous function as we can assume that
1917 the recipient has any changenode we aren't sending them.
1926 the recipient has any changenode we aren't sending them.
1918
1927
1919 nodes is the set of nodes to send"""
1928 nodes is the set of nodes to send"""
1920
1929
1921 self.hook('preoutgoing', throw=True, source=source)
1930 self.hook('preoutgoing', throw=True, source=source)
1922
1931
1923 cl = self.changelog
1932 cl = self.changelog
1924 revset = set([cl.rev(n) for n in nodes])
1933 revset = set([cl.rev(n) for n in nodes])
1925 self.changegroupinfo(nodes, source)
1934 self.changegroupinfo(nodes, source)
1926
1935
1927 def identity(x):
1936 def identity(x):
1928 return x
1937 return x
1929
1938
1930 def gennodelst(log):
1939 def gennodelst(log):
1931 for r in log:
1940 for r in log:
1932 if log.linkrev(r) in revset:
1941 if log.linkrev(r) in revset:
1933 yield log.node(r)
1942 yield log.node(r)
1934
1943
1935 def changed_file_collector(changedfileset):
1944 def changed_file_collector(changedfileset):
1936 def collect_changed_files(clnode):
1945 def collect_changed_files(clnode):
1937 c = cl.read(clnode)
1946 c = cl.read(clnode)
1938 changedfileset.update(c[3])
1947 changedfileset.update(c[3])
1939 return collect_changed_files
1948 return collect_changed_files
1940
1949
1941 def lookuprevlink_func(revlog):
1950 def lookuprevlink_func(revlog):
1942 def lookuprevlink(n):
1951 def lookuprevlink(n):
1943 return cl.node(revlog.linkrev(revlog.rev(n)))
1952 return cl.node(revlog.linkrev(revlog.rev(n)))
1944 return lookuprevlink
1953 return lookuprevlink
1945
1954
1946 def gengroup():
1955 def gengroup():
1947 '''yield a sequence of changegroup chunks (strings)'''
1956 '''yield a sequence of changegroup chunks (strings)'''
1948 # construct a list of all changed files
1957 # construct a list of all changed files
1949 changedfiles = set()
1958 changedfiles = set()
1950
1959
1951 for chnk in cl.group(nodes, identity,
1960 for chnk in cl.group(nodes, identity,
1952 changed_file_collector(changedfiles)):
1961 changed_file_collector(changedfiles)):
1953 yield chnk
1962 yield chnk
1954
1963
1955 mnfst = self.manifest
1964 mnfst = self.manifest
1956 nodeiter = gennodelst(mnfst)
1965 nodeiter = gennodelst(mnfst)
1957 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1966 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1958 yield chnk
1967 yield chnk
1959
1968
1960 for fname in sorted(changedfiles):
1969 for fname in sorted(changedfiles):
1961 filerevlog = self.file(fname)
1970 filerevlog = self.file(fname)
1962 if not len(filerevlog):
1971 if not len(filerevlog):
1963 raise util.Abort(_("empty or missing revlog for %s") % fname)
1972 raise util.Abort(_("empty or missing revlog for %s") % fname)
1964 nodeiter = gennodelst(filerevlog)
1973 nodeiter = gennodelst(filerevlog)
1965 nodeiter = list(nodeiter)
1974 nodeiter = list(nodeiter)
1966 if nodeiter:
1975 if nodeiter:
1967 yield changegroup.chunkheader(len(fname))
1976 yield changegroup.chunkheader(len(fname))
1968 yield fname
1977 yield fname
1969 lookup = lookuprevlink_func(filerevlog)
1978 lookup = lookuprevlink_func(filerevlog)
1970 for chnk in filerevlog.group(nodeiter, lookup):
1979 for chnk in filerevlog.group(nodeiter, lookup):
1971 yield chnk
1980 yield chnk
1972
1981
1973 yield changegroup.closechunk()
1982 yield changegroup.closechunk()
1974
1983
1975 if nodes:
1984 if nodes:
1976 self.hook('outgoing', node=hex(nodes[0]), source=source)
1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1977
1986
1978 return util.chunkbuffer(gengroup())
1987 return util.chunkbuffer(gengroup())
1979
1988
1980 def addchangegroup(self, source, srctype, url, emptyok=False):
1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1981 """add changegroup to repo.
1990 """add changegroup to repo.
1982
1991
1983 return values:
1992 return values:
1984 - nothing changed or no source: 0
1993 - nothing changed or no source: 0
1985 - more heads than before: 1+added heads (2..n)
1994 - more heads than before: 1+added heads (2..n)
1986 - less heads than before: -1-removed heads (-2..-n)
1995 - less heads than before: -1-removed heads (-2..-n)
1987 - number of heads stays the same: 1
1996 - number of heads stays the same: 1
1988 """
1997 """
1989 def csmap(x):
1998 def csmap(x):
1990 self.ui.debug("add changeset %s\n" % short(x))
1999 self.ui.debug("add changeset %s\n" % short(x))
1991 return len(cl)
2000 return len(cl)
1992
2001
1993 def revmap(x):
2002 def revmap(x):
1994 return cl.rev(x)
2003 return cl.rev(x)
1995
2004
1996 if not source:
2005 if not source:
1997 return 0
2006 return 0
1998
2007
1999 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2008 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2000
2009
2001 changesets = files = revisions = 0
2010 changesets = files = revisions = 0
2002
2011
2003 # write changelog data to temp files so concurrent readers will not see
2012 # write changelog data to temp files so concurrent readers will not see
2004 # inconsistent view
2013 # inconsistent view
2005 cl = self.changelog
2014 cl = self.changelog
2006 cl.delayupdate()
2015 cl.delayupdate()
2007 oldheads = len(cl.heads())
2016 oldheads = len(cl.heads())
2008
2017
2009 tr = self.transaction()
2018 tr = self.transaction()
2010 try:
2019 try:
2011 trp = weakref.proxy(tr)
2020 trp = weakref.proxy(tr)
2012 # pull off the changeset group
2021 # pull off the changeset group
2013 self.ui.status(_("adding changesets\n"))
2022 self.ui.status(_("adding changesets\n"))
2014 clstart = len(cl)
2023 clstart = len(cl)
2015 chunkiter = changegroup.chunkiter(source)
2024 chunkiter = changegroup.chunkiter(source)
2016 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2025 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2017 raise util.Abort(_("received changelog group is empty"))
2026 raise util.Abort(_("received changelog group is empty"))
2018 clend = len(cl)
2027 clend = len(cl)
2019 changesets = clend - clstart
2028 changesets = clend - clstart
2020
2029
2021 # pull off the manifest group
2030 # pull off the manifest group
2022 self.ui.status(_("adding manifests\n"))
2031 self.ui.status(_("adding manifests\n"))
2023 chunkiter = changegroup.chunkiter(source)
2032 chunkiter = changegroup.chunkiter(source)
2024 # no need to check for empty manifest group here:
2033 # no need to check for empty manifest group here:
2025 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2026 # no new manifest will be created and the manifest group will
2035 # no new manifest will be created and the manifest group will
2027 # be empty during the pull
2036 # be empty during the pull
2028 self.manifest.addgroup(chunkiter, revmap, trp)
2037 self.manifest.addgroup(chunkiter, revmap, trp)
2029
2038
2030 # process the files
2039 # process the files
2031 self.ui.status(_("adding file changes\n"))
2040 self.ui.status(_("adding file changes\n"))
2032 while 1:
2041 while 1:
2033 f = changegroup.getchunk(source)
2042 f = changegroup.getchunk(source)
2034 if not f:
2043 if not f:
2035 break
2044 break
2036 self.ui.debug("adding %s revisions\n" % f)
2045 self.ui.debug("adding %s revisions\n" % f)
2037 fl = self.file(f)
2046 fl = self.file(f)
2038 o = len(fl)
2047 o = len(fl)
2039 chunkiter = changegroup.chunkiter(source)
2048 chunkiter = changegroup.chunkiter(source)
2040 if fl.addgroup(chunkiter, revmap, trp) is None:
2049 if fl.addgroup(chunkiter, revmap, trp) is None:
2041 raise util.Abort(_("received file revlog group is empty"))
2050 raise util.Abort(_("received file revlog group is empty"))
2042 revisions += len(fl) - o
2051 revisions += len(fl) - o
2043 files += 1
2052 files += 1
2044
2053
2045 newheads = len(cl.heads())
2054 newheads = len(cl.heads())
2046 heads = ""
2055 heads = ""
2047 if oldheads and newheads != oldheads:
2056 if oldheads and newheads != oldheads:
2048 heads = _(" (%+d heads)") % (newheads - oldheads)
2057 heads = _(" (%+d heads)") % (newheads - oldheads)
2049
2058
2050 self.ui.status(_("added %d changesets"
2059 self.ui.status(_("added %d changesets"
2051 " with %d changes to %d files%s\n")
2060 " with %d changes to %d files%s\n")
2052 % (changesets, revisions, files, heads))
2061 % (changesets, revisions, files, heads))
2053
2062
2054 if changesets > 0:
2063 if changesets > 0:
2055 p = lambda: cl.writepending() and self.root or ""
2064 p = lambda: cl.writepending() and self.root or ""
2056 self.hook('pretxnchangegroup', throw=True,
2065 self.hook('pretxnchangegroup', throw=True,
2057 node=hex(cl.node(clstart)), source=srctype,
2066 node=hex(cl.node(clstart)), source=srctype,
2058 url=url, pending=p)
2067 url=url, pending=p)
2059
2068
2060 # make changelog see real files again
2069 # make changelog see real files again
2061 cl.finalize(trp)
2070 cl.finalize(trp)
2062
2071
2063 tr.close()
2072 tr.close()
2064 finally:
2073 finally:
2065 del tr
2074 del tr
2066
2075
2067 if changesets > 0:
2076 if changesets > 0:
2068 # forcefully update the on-disk branch cache
2077 # forcefully update the on-disk branch cache
2069 self.ui.debug("updating the branch cache\n")
2078 self.ui.debug("updating the branch cache\n")
2070 self.branchtags()
2079 self.branchtags()
2071 self.hook("changegroup", node=hex(cl.node(clstart)),
2080 self.hook("changegroup", node=hex(cl.node(clstart)),
2072 source=srctype, url=url)
2081 source=srctype, url=url)
2073
2082
2074 for i in xrange(clstart, clend):
2083 for i in xrange(clstart, clend):
2075 self.hook("incoming", node=hex(cl.node(i)),
2084 self.hook("incoming", node=hex(cl.node(i)),
2076 source=srctype, url=url)
2085 source=srctype, url=url)
2077
2086
2078 # never return 0 here:
2087 # never return 0 here:
2079 if newheads < oldheads:
2088 if newheads < oldheads:
2080 return newheads - oldheads - 1
2089 return newheads - oldheads - 1
2081 else:
2090 else:
2082 return newheads - oldheads + 1
2091 return newheads - oldheads + 1
2083
2092
2084
2093
2085 def stream_in(self, remote):
2094 def stream_in(self, remote):
2086 fp = remote.stream_out()
2095 fp = remote.stream_out()
2087 l = fp.readline()
2096 l = fp.readline()
2088 try:
2097 try:
2089 resp = int(l)
2098 resp = int(l)
2090 except ValueError:
2099 except ValueError:
2091 raise error.ResponseError(
2100 raise error.ResponseError(
2092 _('Unexpected response from remote server:'), l)
2101 _('Unexpected response from remote server:'), l)
2093 if resp == 1:
2102 if resp == 1:
2094 raise util.Abort(_('operation forbidden by server'))
2103 raise util.Abort(_('operation forbidden by server'))
2095 elif resp == 2:
2104 elif resp == 2:
2096 raise util.Abort(_('locking the remote repository failed'))
2105 raise util.Abort(_('locking the remote repository failed'))
2097 elif resp != 0:
2106 elif resp != 0:
2098 raise util.Abort(_('the server sent an unknown error code'))
2107 raise util.Abort(_('the server sent an unknown error code'))
2099 self.ui.status(_('streaming all changes\n'))
2108 self.ui.status(_('streaming all changes\n'))
2100 l = fp.readline()
2109 l = fp.readline()
2101 try:
2110 try:
2102 total_files, total_bytes = map(int, l.split(' ', 1))
2111 total_files, total_bytes = map(int, l.split(' ', 1))
2103 except (ValueError, TypeError):
2112 except (ValueError, TypeError):
2104 raise error.ResponseError(
2113 raise error.ResponseError(
2105 _('Unexpected response from remote server:'), l)
2114 _('Unexpected response from remote server:'), l)
2106 self.ui.status(_('%d files to transfer, %s of data\n') %
2115 self.ui.status(_('%d files to transfer, %s of data\n') %
2107 (total_files, util.bytecount(total_bytes)))
2116 (total_files, util.bytecount(total_bytes)))
2108 start = time.time()
2117 start = time.time()
2109 for i in xrange(total_files):
2118 for i in xrange(total_files):
2110 # XXX doesn't support '\n' or '\r' in filenames
2119 # XXX doesn't support '\n' or '\r' in filenames
2111 l = fp.readline()
2120 l = fp.readline()
2112 try:
2121 try:
2113 name, size = l.split('\0', 1)
2122 name, size = l.split('\0', 1)
2114 size = int(size)
2123 size = int(size)
2115 except (ValueError, TypeError):
2124 except (ValueError, TypeError):
2116 raise error.ResponseError(
2125 raise error.ResponseError(
2117 _('Unexpected response from remote server:'), l)
2126 _('Unexpected response from remote server:'), l)
2118 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2127 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2119 # for backwards compat, name was partially encoded
2128 # for backwards compat, name was partially encoded
2120 ofp = self.sopener(store.decodedir(name), 'w')
2129 ofp = self.sopener(store.decodedir(name), 'w')
2121 for chunk in util.filechunkiter(fp, limit=size):
2130 for chunk in util.filechunkiter(fp, limit=size):
2122 ofp.write(chunk)
2131 ofp.write(chunk)
2123 ofp.close()
2132 ofp.close()
2124 elapsed = time.time() - start
2133 elapsed = time.time() - start
2125 if elapsed <= 0:
2134 if elapsed <= 0:
2126 elapsed = 0.001
2135 elapsed = 0.001
2127 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2136 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2128 (util.bytecount(total_bytes), elapsed,
2137 (util.bytecount(total_bytes), elapsed,
2129 util.bytecount(total_bytes / elapsed)))
2138 util.bytecount(total_bytes / elapsed)))
2130 self.invalidate()
2139 self.invalidate()
2131 return len(self.heads()) + 1
2140 return len(self.heads()) + 1
2132
2141
2133 def clone(self, remote, heads=[], stream=False):
2142 def clone(self, remote, heads=[], stream=False):
2134 '''clone remote repository.
2143 '''clone remote repository.
2135
2144
2136 keyword arguments:
2145 keyword arguments:
2137 heads: list of revs to clone (forces use of pull)
2146 heads: list of revs to clone (forces use of pull)
2138 stream: use streaming clone if possible'''
2147 stream: use streaming clone if possible'''
2139
2148
2140 # now, all clients that can request uncompressed clones can
2149 # now, all clients that can request uncompressed clones can
2141 # read repo formats supported by all servers that can serve
2150 # read repo formats supported by all servers that can serve
2142 # them.
2151 # them.
2143
2152
2144 # if revlog format changes, client will have to check version
2153 # if revlog format changes, client will have to check version
2145 # and format flags on "stream" capability, and use
2154 # and format flags on "stream" capability, and use
2146 # uncompressed only if compatible.
2155 # uncompressed only if compatible.
2147
2156
2148 if stream and not heads and remote.capable('stream'):
2157 if stream and not heads and remote.capable('stream'):
2149 return self.stream_in(remote)
2158 return self.stream_in(remote)
2150 return self.pull(remote, heads)
2159 return self.pull(remote, heads)
2151
2160
2152 # used to avoid circular references so destructors work
2161 # used to avoid circular references so destructors work
2153 def aftertrans(files):
2162 def aftertrans(files):
2154 renamefiles = [tuple(t) for t in files]
2163 renamefiles = [tuple(t) for t in files]
2155 def a():
2164 def a():
2156 for src, dest in renamefiles:
2165 for src, dest in renamefiles:
2157 util.rename(src, dest)
2166 util.rename(src, dest)
2158 return a
2167 return a
2159
2168
2160 def instance(ui, path, create):
2169 def instance(ui, path, create):
2161 return localrepository(ui, util.drop_scheme('file', path), create)
2170 return localrepository(ui, util.drop_scheme('file', path), create)
2162
2171
2163 def islocal(path):
2172 def islocal(path):
2164 return True
2173 return True
@@ -1,71 +1,73 b''
1 % init repo1
1 % init repo1
2
2
3 % add a; ci
3 % add a; ci
4 adding a
4 adding a
5
5
6 % cat .hg/store/fncache
6 % cat .hg/store/fncache
7 data/a.i
7 data/a.i
8
8
9 % add a.i/b; ci
9 % add a.i/b; ci
10 adding a.i/b
10 adding a.i/b
11
11
12 % cat .hg/store/fncache
12 % cat .hg/store/fncache
13 data/a.i
13 data/a.i
14 data/a.i.hg/b.i
14 data/a.i.hg/b.i
15
15
16 % add a.i.hg/c; ci
16 % add a.i.hg/c; ci
17 adding a.i.hg/c
17 adding a.i.hg/c
18
18
19 % cat .hg/store/fncache
19 % cat .hg/store/fncache
20 data/a.i
20 data/a.i
21 data/a.i.hg/b.i
21 data/a.i.hg/b.i
22 data/a.i.hg.hg/c.i
22 data/a.i.hg.hg/c.i
23
23
24 % hg verify
24 % hg verify
25 checking changesets
25 checking changesets
26 checking manifests
26 checking manifests
27 crosschecking files in changesets and manifests
27 crosschecking files in changesets and manifests
28 checking files
28 checking files
29 3 files, 3 changesets, 3 total revisions
29 3 files, 3 changesets, 3 total revisions
30
30
31 % rm .hg/store/fncache
31 % rm .hg/store/fncache
32
32
33 % hg verify
33 % hg verify
34 checking changesets
34 checking changesets
35 checking manifests
35 checking manifests
36 crosschecking files in changesets and manifests
36 crosschecking files in changesets and manifests
37 checking files
37 checking files
38 data/a.i@0: missing revlog!
38 data/a.i@0: missing revlog!
39 data/a.i.hg/c.i@2: missing revlog!
39 data/a.i.hg/c.i@2: missing revlog!
40 data/a.i/b.i@1: missing revlog!
40 data/a.i/b.i@1: missing revlog!
41 3 files, 3 changesets, 3 total revisions
41 3 files, 3 changesets, 3 total revisions
42 3 integrity errors encountered!
42 3 integrity errors encountered!
43 (first damaged changeset appears to be 0)
43 (first damaged changeset appears to be 0)
44 % non store repo
44 % non store repo
45 adding tst.d/foo
45 adding tst.d/foo
46 .hg
46 .hg
47 .hg/00changelog.i
47 .hg/00changelog.i
48 .hg/00manifest.i
48 .hg/00manifest.i
49 .hg/data
49 .hg/data
50 .hg/data/tst.d.hg
50 .hg/data/tst.d.hg
51 .hg/data/tst.d.hg/foo.i
51 .hg/data/tst.d.hg/foo.i
52 .hg/dirstate
52 .hg/dirstate
53 .hg/last-message.txt
53 .hg/requires
54 .hg/requires
54 .hg/undo
55 .hg/undo
55 .hg/undo.branch
56 .hg/undo.branch
56 .hg/undo.dirstate
57 .hg/undo.dirstate
57 % non fncache repo
58 % non fncache repo
58 adding tst.d/Foo
59 adding tst.d/Foo
59 .hg
60 .hg
60 .hg/00changelog.i
61 .hg/00changelog.i
61 .hg/dirstate
62 .hg/dirstate
63 .hg/last-message.txt
62 .hg/requires
64 .hg/requires
63 .hg/store
65 .hg/store
64 .hg/store/00changelog.i
66 .hg/store/00changelog.i
65 .hg/store/00manifest.i
67 .hg/store/00manifest.i
66 .hg/store/data
68 .hg/store/data
67 .hg/store/data/tst.d.hg
69 .hg/store/data/tst.d.hg
68 .hg/store/data/tst.d.hg/_foo.i
70 .hg/store/data/tst.d.hg/_foo.i
69 .hg/store/undo
71 .hg/store/undo
70 .hg/undo.branch
72 .hg/undo.branch
71 .hg/undo.dirstate
73 .hg/undo.dirstate
@@ -1,56 +1,57 b''
1 % before commit
1 % before commit
2 % store can be written by the group, other files cannot
2 % store can be written by the group, other files cannot
3 % store is setgid
3 % store is setgid
4 00700 ./.hg/
4 00700 ./.hg/
5 00600 ./.hg/00changelog.i
5 00600 ./.hg/00changelog.i
6 00600 ./.hg/requires
6 00600 ./.hg/requires
7 00770 ./.hg/store/
7 00770 ./.hg/store/
8
8
9 % after commit
9 % after commit
10 % working dir files can only be written by the owner
10 % working dir files can only be written by the owner
11 % files created in .hg can be written by the group
11 % files created in .hg can be written by the group
12 % (in particular, store/**, dirstate, branch cache file, undo files)
12 % (in particular, store/**, dirstate, branch cache file, undo files)
13 % new directories are setgid
13 % new directories are setgid
14 00700 ./.hg/
14 00700 ./.hg/
15 00600 ./.hg/00changelog.i
15 00600 ./.hg/00changelog.i
16 00660 ./.hg/dirstate
16 00660 ./.hg/dirstate
17 00660 ./.hg/last-message.txt
17 00600 ./.hg/requires
18 00600 ./.hg/requires
18 00770 ./.hg/store/
19 00770 ./.hg/store/
19 00660 ./.hg/store/00changelog.i
20 00660 ./.hg/store/00changelog.i
20 00660 ./.hg/store/00manifest.i
21 00660 ./.hg/store/00manifest.i
21 00770 ./.hg/store/data/
22 00770 ./.hg/store/data/
22 00770 ./.hg/store/data/dir/
23 00770 ./.hg/store/data/dir/
23 00660 ./.hg/store/data/dir/bar.i
24 00660 ./.hg/store/data/dir/bar.i
24 00660 ./.hg/store/data/foo.i
25 00660 ./.hg/store/data/foo.i
25 00660 ./.hg/store/fncache
26 00660 ./.hg/store/fncache
26 00660 ./.hg/store/undo
27 00660 ./.hg/store/undo
27 00660 ./.hg/undo.branch
28 00660 ./.hg/undo.branch
28 00660 ./.hg/undo.dirstate
29 00660 ./.hg/undo.dirstate
29 00700 ./dir/
30 00700 ./dir/
30 00600 ./dir/bar
31 00600 ./dir/bar
31 00600 ./foo
32 00600 ./foo
32
33
33 % before push
34 % before push
34 % group can write everything
35 % group can write everything
35 00770 ../push/.hg/
36 00770 ../push/.hg/
36 00660 ../push/.hg/00changelog.i
37 00660 ../push/.hg/00changelog.i
37 00660 ../push/.hg/requires
38 00660 ../push/.hg/requires
38 00770 ../push/.hg/store/
39 00770 ../push/.hg/store/
39
40
40 % after push
41 % after push
41 % group can still write everything
42 % group can still write everything
42 00770 ../push/.hg/
43 00770 ../push/.hg/
43 00660 ../push/.hg/00changelog.i
44 00660 ../push/.hg/00changelog.i
44 00660 ../push/.hg/branchheads.cache
45 00660 ../push/.hg/branchheads.cache
45 00660 ../push/.hg/requires
46 00660 ../push/.hg/requires
46 00770 ../push/.hg/store/
47 00770 ../push/.hg/store/
47 00660 ../push/.hg/store/00changelog.i
48 00660 ../push/.hg/store/00changelog.i
48 00660 ../push/.hg/store/00manifest.i
49 00660 ../push/.hg/store/00manifest.i
49 00770 ../push/.hg/store/data/
50 00770 ../push/.hg/store/data/
50 00770 ../push/.hg/store/data/dir/
51 00770 ../push/.hg/store/data/dir/
51 00660 ../push/.hg/store/data/dir/bar.i
52 00660 ../push/.hg/store/data/dir/bar.i
52 00660 ../push/.hg/store/data/foo.i
53 00660 ../push/.hg/store/data/foo.i
53 00660 ../push/.hg/store/fncache
54 00660 ../push/.hg/store/fncache
54 00660 ../push/.hg/store/undo
55 00660 ../push/.hg/store/undo
55 00660 ../push/.hg/undo.branch
56 00660 ../push/.hg/undo.branch
56 00660 ../push/.hg/undo.dirstate
57 00660 ../push/.hg/undo.dirstate
@@ -1,28 +1,39 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 echo a > a
6 echo a > a
7 hg add a
7 hg add a
8 hg commit -m "test" -d "1000000 0"
8 hg commit -m "test" -d "1000000 0"
9 hg verify
9 hg verify
10 hg parents
10 hg parents
11 hg status
11 hg status
12 hg rollback
12 hg rollback
13 hg verify
13 hg verify
14 hg parents
14 hg parents
15 hg status
15 hg status
16
16
17 echo % Test issue 902
17 echo % Test issue 902
18 hg commit -m "test"
18 hg commit -m "test2"
19 hg branch test
19 hg branch test
20 hg rollback
20 hg rollback
21 hg branch
21 hg branch
22
22
23 echo '% Test issue 1635 (commit message saved)'
24 echo '.hg/last-message.txt:'
25 cat .hg/last-message.txt
26
23 echo % Test rollback of hg before issue 902 was fixed
27 echo % Test rollback of hg before issue 902 was fixed
24 hg commit -m "test"
28 hg commit -m "test3"
25 hg branch test
29 hg branch test
26 rm .hg/undo.branch
30 rm .hg/undo.branch
27 hg rollback
31 hg rollback
28 hg branch
32 hg branch
33
34 echo '% rollback by pretxncommit saves commit message (issue 1635)'
35 echo a >> a
36 hg --config hooks.pretxncommit=/bin/false commit -m"precious commit message"
37
38 echo '.hg/last-message.txt:'
39 cat .hg/last-message.txt
@@ -1,27 +1,36 b''
1 checking changesets
1 checking changesets
2 checking manifests
2 checking manifests
3 crosschecking files in changesets and manifests
3 crosschecking files in changesets and manifests
4 checking files
4 checking files
5 1 files, 1 changesets, 1 total revisions
5 1 files, 1 changesets, 1 total revisions
6 changeset: 0:0acdaf898367
6 changeset: 0:0acdaf898367
7 tag: tip
7 tag: tip
8 user: test
8 user: test
9 date: Mon Jan 12 13:46:40 1970 +0000
9 date: Mon Jan 12 13:46:40 1970 +0000
10 summary: test
10 summary: test
11
11
12 rolling back last transaction
12 rolling back last transaction
13 checking changesets
13 checking changesets
14 checking manifests
14 checking manifests
15 crosschecking files in changesets and manifests
15 crosschecking files in changesets and manifests
16 checking files
16 checking files
17 0 files, 0 changesets, 0 total revisions
17 0 files, 0 changesets, 0 total revisions
18 A a
18 A a
19 % Test issue 902
19 % Test issue 902
20 marked working directory as branch test
20 marked working directory as branch test
21 rolling back last transaction
21 rolling back last transaction
22 default
22 default
23 % Test issue 1635 (commit message saved)
24 .hg/last-message.txt:
25 test2
23 % Test rollback of hg before issue 902 was fixed
26 % Test rollback of hg before issue 902 was fixed
24 marked working directory as branch test
27 marked working directory as branch test
25 rolling back last transaction
28 rolling back last transaction
26 Named branch could not be reset, current branch still is: test
29 Named branch could not be reset, current branch still is: test
27 test
30 test
31 % rollback by pretxncommit saves commit message (issue 1635)
32 transaction abort!
33 rollback completed
34 abort: pretxncommit hook exited with status 1
35 .hg/last-message.txt:
36 precious commit message
General Comments 0
You need to be logged in to leave comments. Login now