##// END OF EJS Templates
Factor tags module out of localrepo (issue548)....
Greg Ward -
r9149:abb7d4d4 default
parent child Browse files
Show More
@@ -0,0 +1,122 b''
1 # tags.py - read tag info from local repository
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
8
9 # Currently this module only deals with reading tags. Soon it will grow
10 # support for caching tag info. Eventually, it could take care of
11 # updating (adding/removing/moving) tags too.
12
13 from node import bin, hex
14 from i18n import _
15 import encoding
16 import error
17
18 def findglobaltags(ui, repo, alltags, tagtypes):
19 '''Find global tags in repo by reading .hgtags from every head that
20 has a distinct version of it. Updates the dicts alltags, tagtypes
21 in place: alltags maps tag name to (node, hist) pair (see _readtags()
22 below), and tagtypes maps tag name to tag type ('global' in this
23 case).'''
24
25 seen = set()
26 fctx = None
27 ctxs = [] # list of filectx
28 for node in repo.heads():
29 try:
30 fnode = repo[node].filenode('.hgtags')
31 except error.LookupError:
32 continue
33 if fnode not in seen:
34 seen.add(fnode)
35 if not fctx:
36 fctx = repo.filectx('.hgtags', fileid=fnode)
37 else:
38 fctx = fctx.filectx(fnode)
39 ctxs.append(fctx)
40
41 # read the tags file from each head, ending with the tip
42 for fctx in reversed(ctxs):
43 filetags = _readtags(
44 ui, repo, fctx.data().splitlines(), fctx)
45 _updatetags(filetags, "global", alltags, tagtypes)
46
47 def readlocaltags(ui, repo, alltags, tagtypes):
48 '''Read local tags in repo. Update alltags and tagtypes.'''
49 try:
50 data = encoding.fromlocal(repo.opener("localtags").read())
51 # localtags are stored in the local character set
52 # while the internal tag table is stored in UTF-8
53 filetags = _readtags(
54 ui, repo, data.splitlines(), "localtags")
55 _updatetags(filetags, "local", alltags, tagtypes)
56 except IOError:
57 pass
58
59 def _readtags(ui, repo, lines, fn):
60 '''Read tag definitions from a file (or any source of lines).
61 Return a mapping from tag name to (node, hist): node is the node id
62 from the last line read for that name, and hist is the list of node
63 ids previously associated with it (in file order). All node ids are
64 binary, not hex.'''
65
66 filetags = {} # map tag name to (node, hist)
67 count = 0
68
69 def warn(msg):
70 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
71
72 for line in lines:
73 count += 1
74 if not line:
75 continue
76 try:
77 (nodehex, name) = line.split(" ", 1)
78 except ValueError:
79 warn(_("cannot parse entry"))
80 continue
81 name = encoding.tolocal(name.strip()) # stored in UTF-8
82 try:
83 nodebin = bin(nodehex)
84 except TypeError:
85 warn(_("node '%s' is not well formed") % nodehex)
86 continue
87 if nodebin not in repo.changelog.nodemap:
88 # silently ignore as pull -r might cause this
89 continue
90
91 # update filetags
92 hist = []
93 if name in filetags:
94 n, hist = filetags[name]
95 hist.append(n)
96 filetags[name] = (nodebin, hist)
97 return filetags
98
99 def _updatetags(filetags, tagtype, alltags, tagtypes):
100 '''Incorporate the tag info read from one file into the two
101 dictionaries, alltags and tagtypes, that contain all tag
102 info (global across all heads plus local).'''
103
104 for name, nodehist in filetags.iteritems():
105 if name not in alltags:
106 alltags[name] = nodehist
107 tagtypes[name] = tagtype
108 continue
109
110 # we prefer alltags[name] if:
111 # it supercedes us OR
112 # mutual supercedes and it has a higher rank
113 # otherwise we win because we're tip-most
114 anode, ahist = nodehist
115 bnode, bhist = alltags[name]
116 if (bnode != anode and anode in bhist and
117 (bnode not in ahist or len(bhist) > len(ahist))):
118 anode = bnode
119 ahist.extend([n for n in bhist if n not in ahist])
120 alltags[name] = anode, ahist
121 tagtypes[name] = tagtype
122
@@ -1,2214 +1,2124 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 from lock import release
17 from lock import release
17 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
19 propertycache = util.propertycache
19
20
20 class localrepository(repo.repository):
21 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
23
24
24 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
26 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
29 self.origroot = path
29 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
31 self.baseui = baseui
32 self.baseui = baseui
32 self.ui = baseui.copy()
33 self.ui = baseui.copy()
33
34
34 try:
35 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
37 except IOError:
38 except IOError:
38 pass
39 pass
39
40
40 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
41 if create:
42 if create:
42 if not os.path.exists(path):
43 if not os.path.exists(path):
43 os.mkdir(path)
44 os.mkdir(path)
44 os.mkdir(self.path)
45 os.mkdir(self.path)
45 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
49 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
51 requirements.append("fncache")
51 # create an invalid changelog
52 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
55 )
56 )
56 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
57 for r in requirements:
58 for r in requirements:
58 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
59 reqfile.close()
60 reqfile.close()
60 else:
61 else:
61 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
63 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
64 else:
65 else:
65 # find requirements
66 # find requirements
66 requirements = set()
67 requirements = set()
67 try:
68 try:
68 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
70 except IOError, inst:
70 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
71 raise
72 raise
72 for r in requirements - self.supported:
73 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74
75
75 self.sharedpath = self.path
76 self.sharedpath = self.path
76 try:
77 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
79 if not os.path.exists(s):
79 raise error.RepoError(
80 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 self.sharedpath = s
82 self.sharedpath = s
82 except IOError, inst:
83 except IOError, inst:
83 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
84 raise
85 raise
85
86
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
88 self.spath = self.store.path
88 self.sopener = self.store.opener
89 self.sopener = self.store.opener
89 self.sjoin = self.store.join
90 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
91
92
92 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
93 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
95 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
96 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
97 self._tags = None
98 self._tags = None
98 self._tagtypes = None
99 self._tagtypes = None
99
100
100 self.branchcache = None
101 self.branchcache = None
101 self._ubranchcache = None # UTF-8 version of branchcache
102 self._ubranchcache = None # UTF-8 version of branchcache
102 self._branchcachetip = None
103 self._branchcachetip = None
103 self.nodetagscache = None
104 self.nodetagscache = None
104 self.filterpats = {}
105 self.filterpats = {}
105 self._datafilters = {}
106 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
107
108
108 @propertycache
109 @propertycache
109 def changelog(self):
110 def changelog(self):
110 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
114 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
116 self.sopener.defversion = c.version
116 return c
117 return c
117
118
118 @propertycache
119 @propertycache
119 def manifest(self):
120 def manifest(self):
120 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
121
122
122 @propertycache
123 @propertycache
123 def dirstate(self):
124 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125
126
126 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
127 if changeid is None:
128 if changeid is None:
128 return context.workingctx(self)
129 return context.workingctx(self)
129 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
130
131
131 def __nonzero__(self):
132 def __nonzero__(self):
132 return True
133 return True
133
134
134 def __len__(self):
135 def __len__(self):
135 return len(self.changelog)
136 return len(self.changelog)
136
137
137 def __iter__(self):
138 def __iter__(self):
138 for i in xrange(len(self)):
139 for i in xrange(len(self)):
139 yield i
140 yield i
140
141
141 def url(self):
142 def url(self):
142 return 'file:' + self.root
143 return 'file:' + self.root
143
144
144 def hook(self, name, throw=False, **args):
145 def hook(self, name, throw=False, **args):
145 return hook.hook(self.ui, self, name, throw, **args)
146 return hook.hook(self.ui, self, name, throw, **args)
146
147
147 tag_disallowed = ':\r\n'
148 tag_disallowed = ':\r\n'
148
149
149 def _tag(self, names, node, message, local, user, date, extra={}):
150 def _tag(self, names, node, message, local, user, date, extra={}):
150 if isinstance(names, str):
151 if isinstance(names, str):
151 allchars = names
152 allchars = names
152 names = (names,)
153 names = (names,)
153 else:
154 else:
154 allchars = ''.join(names)
155 allchars = ''.join(names)
155 for c in self.tag_disallowed:
156 for c in self.tag_disallowed:
156 if c in allchars:
157 if c in allchars:
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
158
159
159 for name in names:
160 for name in names:
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 local=local)
162 local=local)
162
163
163 def writetags(fp, names, munge, prevtags):
164 def writetags(fp, names, munge, prevtags):
164 fp.seek(0, 2)
165 fp.seek(0, 2)
165 if prevtags and prevtags[-1] != '\n':
166 if prevtags and prevtags[-1] != '\n':
166 fp.write('\n')
167 fp.write('\n')
167 for name in names:
168 for name in names:
168 m = munge and munge(name) or name
169 m = munge and munge(name) or name
169 if self._tagtypes and name in self._tagtypes:
170 if self._tagtypes and name in self._tagtypes:
170 old = self._tags.get(name, nullid)
171 old = self._tags.get(name, nullid)
171 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(node), m))
173 fp.write('%s %s\n' % (hex(node), m))
173 fp.close()
174 fp.close()
174
175
175 prevtags = ''
176 prevtags = ''
176 if local:
177 if local:
177 try:
178 try:
178 fp = self.opener('localtags', 'r+')
179 fp = self.opener('localtags', 'r+')
179 except IOError:
180 except IOError:
180 fp = self.opener('localtags', 'a')
181 fp = self.opener('localtags', 'a')
181 else:
182 else:
182 prevtags = fp.read()
183 prevtags = fp.read()
183
184
184 # local tags are stored in the current charset
185 # local tags are stored in the current charset
185 writetags(fp, names, None, prevtags)
186 writetags(fp, names, None, prevtags)
186 for name in names:
187 for name in names:
187 self.hook('tag', node=hex(node), tag=name, local=local)
188 self.hook('tag', node=hex(node), tag=name, local=local)
188 return
189 return
189
190
190 try:
191 try:
191 fp = self.wfile('.hgtags', 'rb+')
192 fp = self.wfile('.hgtags', 'rb+')
192 except IOError:
193 except IOError:
193 fp = self.wfile('.hgtags', 'ab')
194 fp = self.wfile('.hgtags', 'ab')
194 else:
195 else:
195 prevtags = fp.read()
196 prevtags = fp.read()
196
197
197 # committed tags are stored in UTF-8
198 # committed tags are stored in UTF-8
198 writetags(fp, names, encoding.fromlocal, prevtags)
199 writetags(fp, names, encoding.fromlocal, prevtags)
199
200
200 if '.hgtags' not in self.dirstate:
201 if '.hgtags' not in self.dirstate:
201 self.add(['.hgtags'])
202 self.add(['.hgtags'])
202
203
203 m = match_.exact(self.root, '', ['.hgtags'])
204 m = match_.exact(self.root, '', ['.hgtags'])
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
205
206
206 for name in names:
207 for name in names:
207 self.hook('tag', node=hex(node), tag=name, local=local)
208 self.hook('tag', node=hex(node), tag=name, local=local)
208
209
209 return tagnode
210 return tagnode
210
211
211 def tag(self, names, node, message, local, user, date):
212 def tag(self, names, node, message, local, user, date):
212 '''tag a revision with one or more symbolic names.
213 '''tag a revision with one or more symbolic names.
213
214
214 names is a list of strings or, when adding a single tag, names may be a
215 names is a list of strings or, when adding a single tag, names may be a
215 string.
216 string.
216
217
217 if local is True, the tags are stored in a per-repository file.
218 if local is True, the tags are stored in a per-repository file.
218 otherwise, they are stored in the .hgtags file, and a new
219 otherwise, they are stored in the .hgtags file, and a new
219 changeset is committed with the change.
220 changeset is committed with the change.
220
221
221 keyword arguments:
222 keyword arguments:
222
223
223 local: whether to store tags in non-version-controlled file
224 local: whether to store tags in non-version-controlled file
224 (default False)
225 (default False)
225
226
226 message: commit message to use if committing
227 message: commit message to use if committing
227
228
228 user: name of user to use if committing
229 user: name of user to use if committing
229
230
230 date: date tuple to use if committing'''
231 date: date tuple to use if committing'''
231
232
232 for x in self.status()[:5]:
233 for x in self.status()[:5]:
233 if '.hgtags' in x:
234 if '.hgtags' in x:
234 raise util.Abort(_('working copy of .hgtags is changed '
235 raise util.Abort(_('working copy of .hgtags is changed '
235 '(please commit .hgtags manually)'))
236 '(please commit .hgtags manually)'))
236
237
237 self.tags() # instantiate the cache
238 self.tags() # instantiate the cache
238 self._tag(names, node, message, local, user, date)
239 self._tag(names, node, message, local, user, date)
239
240
240 def tags(self):
241 def tags(self):
241 '''return a mapping of tag to node'''
242 '''return a mapping of tag to node'''
242 if self._tags is None:
243 if self._tags is None:
243 (self._tags, self._tagtypes) = self._findtags()
244 (self._tags, self._tagtypes) = self._findtags()
244
245
245 return self._tags
246 return self._tags
246
247
247 def _findtags(self):
248 def _findtags(self):
248 '''Do the hard work of finding tags. Return a pair of dicts
249 '''Do the hard work of finding tags. Return a pair of dicts
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 maps tag name to a string like \'global\' or \'local\'.
251 maps tag name to a string like \'global\' or \'local\'.
251 Subclasses or extensions are free to add their own tags, but
252 Subclasses or extensions are free to add their own tags, but
252 should be aware that the returned dicts will be retained for the
253 should be aware that the returned dicts will be retained for the
253 duration of the localrepo object.'''
254 duration of the localrepo object.'''
254
255
255 # XXX what tagtype should subclasses/extensions use? Currently
256 # XXX what tagtype should subclasses/extensions use? Currently
256 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # Should each extension invent its own tag type? Should there
258 # Should each extension invent its own tag type? Should there
258 # be one tagtype for all such "virtual" tags? Or is the status
259 # be one tagtype for all such "virtual" tags? Or is the status
259 # quo fine?
260 # quo fine?
260
261
261 def readtags(lines, fn):
262 '''Read tag definitions from a file (or any source of
263 lines). Return a mapping from tag name to (node, hist):
264 node is the node id from the last line read for that name,
265 and hist is the list of node ids previously associated with
266 it (in file order). All node ids are binary, not hex.'''
267
268 filetags = {} # map tag name to (node, hist)
269 count = 0
270
271 def warn(msg):
272 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
273
274 for line in lines:
275 count += 1
276 if not line:
277 continue
278 try:
279 (nodehex, name) = line.split(" ", 1)
280 except ValueError:
281 warn(_("cannot parse entry"))
282 continue
283 name = encoding.tolocal(name.strip()) # stored in UTF-8
284 try:
285 nodebin = bin(nodehex)
286 except TypeError:
287 warn(_("node '%s' is not well formed") % nodehex)
288 continue
289 if nodebin not in self.changelog.nodemap:
290 # silently ignore as pull -r might cause this
291 continue
292
293 # update filetags
294 hist = []
295 if name in filetags:
296 n, hist = filetags[name]
297 hist.append(n)
298 filetags[name] = (nodebin, hist)
299 return filetags
300
301 alltags = {} # map tag name to (node, hist)
262 alltags = {} # map tag name to (node, hist)
302 tagtypes = {}
263 tagtypes = {}
303
264
304 def updatetags(filetags, tagtype):
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
305 '''Incorporate the tag info read from one file into the two
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
306 dictionaries, alltags and tagtypes, that contain all tag
307 info (global across all heads plus local).'''
308
309 for name, nodehist in filetags.iteritems():
310 if name not in alltags:
311 alltags[name] = nodehist
312 tagtypes[name] = tagtype
313 continue
314
315 # we prefer alltags[name] if:
316 # it supercedes us OR
317 # mutual supercedes and it has a higher rank
318 # otherwise we win because we're tip-most
319 anode, ahist = nodehist
320 bnode, bhist = alltags[name]
321 if (bnode != anode and anode in bhist and
322 (bnode not in ahist or len(bhist) > len(ahist))):
323 anode = bnode
324 ahist.extend([n for n in bhist if n not in ahist])
325 alltags[name] = anode, ahist
326 tagtypes[name] = tagtype
327
328 seen = set()
329 fctx = None
330 ctxs = [] # list of filectx
331 for node in self.heads():
332 try:
333 fnode = self[node].filenode('.hgtags')
334 except error.LookupError:
335 continue
336 if fnode not in seen:
337 seen.add(fnode)
338 if not fctx:
339 fctx = self.filectx('.hgtags', fileid=fnode)
340 else:
341 fctx = fctx.filectx(fnode)
342 ctxs.append(fctx)
343
344 # read the tags file from each head, ending with the tip
345 for fctx in reversed(ctxs):
346 filetags = readtags(fctx.data().splitlines(), fctx)
347 updatetags(filetags, "global")
348
349 try:
350 data = encoding.fromlocal(self.opener("localtags").read())
351 # localtags are stored in the local character set
352 # while the internal tag table is stored in UTF-8
353 filetags = readtags(data.splitlines(), "localtags")
354 updatetags(filetags, "local")
355 except IOError:
356 pass
357
267
358 tags = {}
268 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
269 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
270 if node != nullid:
361 tags[name] = node
271 tags[name] = node
362 tags['tip'] = self.changelog.tip()
272 tags['tip'] = self.changelog.tip()
363 return (tags, tagtypes)
273 return (tags, tagtypes)
364
274
365 def tagtype(self, tagname):
275 def tagtype(self, tagname):
366 '''
276 '''
367 return the type of the given tag. result can be:
277 return the type of the given tag. result can be:
368
278
369 'local' : a local tag
279 'local' : a local tag
370 'global' : a global tag
280 'global' : a global tag
371 None : tag does not exist
281 None : tag does not exist
372 '''
282 '''
373
283
374 self.tags()
284 self.tags()
375
285
376 return self._tagtypes.get(tagname)
286 return self._tagtypes.get(tagname)
377
287
378 def tagslist(self):
288 def tagslist(self):
379 '''return a list of tags ordered by revision'''
289 '''return a list of tags ordered by revision'''
380 l = []
290 l = []
381 for t, n in self.tags().iteritems():
291 for t, n in self.tags().iteritems():
382 try:
292 try:
383 r = self.changelog.rev(n)
293 r = self.changelog.rev(n)
384 except:
294 except:
385 r = -2 # sort to the beginning of the list if unknown
295 r = -2 # sort to the beginning of the list if unknown
386 l.append((r, t, n))
296 l.append((r, t, n))
387 return [(t, n) for r, t, n in sorted(l)]
297 return [(t, n) for r, t, n in sorted(l)]
388
298
389 def nodetags(self, node):
299 def nodetags(self, node):
390 '''return the tags associated with a node'''
300 '''return the tags associated with a node'''
391 if not self.nodetagscache:
301 if not self.nodetagscache:
392 self.nodetagscache = {}
302 self.nodetagscache = {}
393 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
394 self.nodetagscache.setdefault(n, []).append(t)
304 self.nodetagscache.setdefault(n, []).append(t)
395 return self.nodetagscache.get(node, [])
305 return self.nodetagscache.get(node, [])
396
306
397 def _branchtags(self, partial, lrev):
307 def _branchtags(self, partial, lrev):
398 # TODO: rename this function?
308 # TODO: rename this function?
399 tiprev = len(self) - 1
309 tiprev = len(self) - 1
400 if lrev != tiprev:
310 if lrev != tiprev:
401 self._updatebranchcache(partial, lrev+1, tiprev+1)
311 self._updatebranchcache(partial, lrev+1, tiprev+1)
402 self._writebranchcache(partial, self.changelog.tip(), tiprev)
312 self._writebranchcache(partial, self.changelog.tip(), tiprev)
403
313
404 return partial
314 return partial
405
315
406 def branchmap(self):
316 def branchmap(self):
407 tip = self.changelog.tip()
317 tip = self.changelog.tip()
408 if self.branchcache is not None and self._branchcachetip == tip:
318 if self.branchcache is not None and self._branchcachetip == tip:
409 return self.branchcache
319 return self.branchcache
410
320
411 oldtip = self._branchcachetip
321 oldtip = self._branchcachetip
412 self._branchcachetip = tip
322 self._branchcachetip = tip
413 if self.branchcache is None:
323 if self.branchcache is None:
414 self.branchcache = {} # avoid recursion in changectx
324 self.branchcache = {} # avoid recursion in changectx
415 else:
325 else:
416 self.branchcache.clear() # keep using the same dict
326 self.branchcache.clear() # keep using the same dict
417 if oldtip is None or oldtip not in self.changelog.nodemap:
327 if oldtip is None or oldtip not in self.changelog.nodemap:
418 partial, last, lrev = self._readbranchcache()
328 partial, last, lrev = self._readbranchcache()
419 else:
329 else:
420 lrev = self.changelog.rev(oldtip)
330 lrev = self.changelog.rev(oldtip)
421 partial = self._ubranchcache
331 partial = self._ubranchcache
422
332
423 self._branchtags(partial, lrev)
333 self._branchtags(partial, lrev)
424 # this private cache holds all heads (not just tips)
334 # this private cache holds all heads (not just tips)
425 self._ubranchcache = partial
335 self._ubranchcache = partial
426
336
427 # the branch cache is stored on disk as UTF-8, but in the local
337 # the branch cache is stored on disk as UTF-8, but in the local
428 # charset internally
338 # charset internally
429 for k, v in partial.iteritems():
339 for k, v in partial.iteritems():
430 self.branchcache[encoding.tolocal(k)] = v
340 self.branchcache[encoding.tolocal(k)] = v
431 return self.branchcache
341 return self.branchcache
432
342
433
343
434 def branchtags(self):
344 def branchtags(self):
435 '''return a dict where branch names map to the tipmost head of
345 '''return a dict where branch names map to the tipmost head of
436 the branch, open heads come before closed'''
346 the branch, open heads come before closed'''
437 bt = {}
347 bt = {}
438 for bn, heads in self.branchmap().iteritems():
348 for bn, heads in self.branchmap().iteritems():
439 head = None
349 head = None
440 for i in range(len(heads)-1, -1, -1):
350 for i in range(len(heads)-1, -1, -1):
441 h = heads[i]
351 h = heads[i]
442 if 'close' not in self.changelog.read(h)[5]:
352 if 'close' not in self.changelog.read(h)[5]:
443 head = h
353 head = h
444 break
354 break
445 # no open heads were found
355 # no open heads were found
446 if head is None:
356 if head is None:
447 head = heads[-1]
357 head = heads[-1]
448 bt[bn] = head
358 bt[bn] = head
449 return bt
359 return bt
450
360
451
361
452 def _readbranchcache(self):
362 def _readbranchcache(self):
453 partial = {}
363 partial = {}
454 try:
364 try:
455 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
456 lines = f.read().split('\n')
366 lines = f.read().split('\n')
457 f.close()
367 f.close()
458 except (IOError, OSError):
368 except (IOError, OSError):
459 return {}, nullid, nullrev
369 return {}, nullid, nullrev
460
370
461 try:
371 try:
462 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
463 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
464 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
465 # invalidate the cache
375 # invalidate the cache
466 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
467 for l in lines:
377 for l in lines:
468 if not l: continue
378 if not l: continue
469 node, label = l.split(" ", 1)
379 node, label = l.split(" ", 1)
470 partial.setdefault(label.strip(), []).append(bin(node))
380 partial.setdefault(label.strip(), []).append(bin(node))
471 except KeyboardInterrupt:
381 except KeyboardInterrupt:
472 raise
382 raise
473 except Exception, inst:
383 except Exception, inst:
474 if self.ui.debugflag:
384 if self.ui.debugflag:
475 self.ui.warn(str(inst), '\n')
385 self.ui.warn(str(inst), '\n')
476 partial, last, lrev = {}, nullid, nullrev
386 partial, last, lrev = {}, nullid, nullrev
477 return partial, last, lrev
387 return partial, last, lrev
478
388
479 def _writebranchcache(self, branches, tip, tiprev):
389 def _writebranchcache(self, branches, tip, tiprev):
480 try:
390 try:
481 f = self.opener("branchheads.cache", "w", atomictemp=True)
391 f = self.opener("branchheads.cache", "w", atomictemp=True)
482 f.write("%s %s\n" % (hex(tip), tiprev))
392 f.write("%s %s\n" % (hex(tip), tiprev))
483 for label, nodes in branches.iteritems():
393 for label, nodes in branches.iteritems():
484 for node in nodes:
394 for node in nodes:
485 f.write("%s %s\n" % (hex(node), label))
395 f.write("%s %s\n" % (hex(node), label))
486 f.rename()
396 f.rename()
487 except (IOError, OSError):
397 except (IOError, OSError):
488 pass
398 pass
489
399
490 def _updatebranchcache(self, partial, start, end):
400 def _updatebranchcache(self, partial, start, end):
491 # collect new branch entries
401 # collect new branch entries
492 newbranches = {}
402 newbranches = {}
493 for r in xrange(start, end):
403 for r in xrange(start, end):
494 c = self[r]
404 c = self[r]
495 newbranches.setdefault(c.branch(), []).append(c.node())
405 newbranches.setdefault(c.branch(), []).append(c.node())
496 # if older branchheads are reachable from new ones, they aren't
406 # if older branchheads are reachable from new ones, they aren't
497 # really branchheads. Note checking parents is insufficient:
407 # really branchheads. Note checking parents is insufficient:
498 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
408 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
499 for branch, newnodes in newbranches.iteritems():
409 for branch, newnodes in newbranches.iteritems():
500 bheads = partial.setdefault(branch, [])
410 bheads = partial.setdefault(branch, [])
501 bheads.extend(newnodes)
411 bheads.extend(newnodes)
502 if len(bheads) < 2:
412 if len(bheads) < 2:
503 continue
413 continue
504 newbheads = []
414 newbheads = []
505 # starting from tip means fewer passes over reachable
415 # starting from tip means fewer passes over reachable
506 while newnodes:
416 while newnodes:
507 latest = newnodes.pop()
417 latest = newnodes.pop()
508 if latest not in bheads:
418 if latest not in bheads:
509 continue
419 continue
510 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
420 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
511 reachable = self.changelog.reachable(latest, minbhrev)
421 reachable = self.changelog.reachable(latest, minbhrev)
512 bheads = [b for b in bheads if b not in reachable]
422 bheads = [b for b in bheads if b not in reachable]
513 newbheads.insert(0, latest)
423 newbheads.insert(0, latest)
514 bheads.extend(newbheads)
424 bheads.extend(newbheads)
515 partial[branch] = bheads
425 partial[branch] = bheads
516
426
517 def lookup(self, key):
427 def lookup(self, key):
518 if isinstance(key, int):
428 if isinstance(key, int):
519 return self.changelog.node(key)
429 return self.changelog.node(key)
520 elif key == '.':
430 elif key == '.':
521 return self.dirstate.parents()[0]
431 return self.dirstate.parents()[0]
522 elif key == 'null':
432 elif key == 'null':
523 return nullid
433 return nullid
524 elif key == 'tip':
434 elif key == 'tip':
525 return self.changelog.tip()
435 return self.changelog.tip()
526 n = self.changelog._match(key)
436 n = self.changelog._match(key)
527 if n:
437 if n:
528 return n
438 return n
529 if key in self.tags():
439 if key in self.tags():
530 return self.tags()[key]
440 return self.tags()[key]
531 if key in self.branchtags():
441 if key in self.branchtags():
532 return self.branchtags()[key]
442 return self.branchtags()[key]
533 n = self.changelog._partialmatch(key)
443 n = self.changelog._partialmatch(key)
534 if n:
444 if n:
535 return n
445 return n
536
446
537 # can't find key, check if it might have come from damaged dirstate
447 # can't find key, check if it might have come from damaged dirstate
538 if key in self.dirstate.parents():
448 if key in self.dirstate.parents():
539 raise error.Abort(_("working directory has unknown parent '%s'!")
449 raise error.Abort(_("working directory has unknown parent '%s'!")
540 % short(key))
450 % short(key))
541 try:
451 try:
542 if len(key) == 20:
452 if len(key) == 20:
543 key = hex(key)
453 key = hex(key)
544 except:
454 except:
545 pass
455 pass
546 raise error.RepoError(_("unknown revision '%s'") % key)
456 raise error.RepoError(_("unknown revision '%s'") % key)
547
457
548 def local(self):
458 def local(self):
549 return True
459 return True
550
460
551 def join(self, f):
461 def join(self, f):
552 return os.path.join(self.path, f)
462 return os.path.join(self.path, f)
553
463
554 def wjoin(self, f):
464 def wjoin(self, f):
555 return os.path.join(self.root, f)
465 return os.path.join(self.root, f)
556
466
557 def rjoin(self, f):
467 def rjoin(self, f):
558 return os.path.join(self.root, util.pconvert(f))
468 return os.path.join(self.root, util.pconvert(f))
559
469
560 def file(self, f):
470 def file(self, f):
561 if f[0] == '/':
471 if f[0] == '/':
562 f = f[1:]
472 f = f[1:]
563 return filelog.filelog(self.sopener, f)
473 return filelog.filelog(self.sopener, f)
564
474
565 def changectx(self, changeid):
475 def changectx(self, changeid):
566 return self[changeid]
476 return self[changeid]
567
477
568 def parents(self, changeid=None):
478 def parents(self, changeid=None):
569 '''get list of changectxs for parents of changeid'''
479 '''get list of changectxs for parents of changeid'''
570 return self[changeid].parents()
480 return self[changeid].parents()
571
481
572 def filectx(self, path, changeid=None, fileid=None):
482 def filectx(self, path, changeid=None, fileid=None):
573 """changeid can be a changeset revision, node, or tag.
483 """changeid can be a changeset revision, node, or tag.
574 fileid can be a file revision or node."""
484 fileid can be a file revision or node."""
575 return context.filectx(self, path, changeid, fileid)
485 return context.filectx(self, path, changeid, fileid)
576
486
577 def getcwd(self):
487 def getcwd(self):
578 return self.dirstate.getcwd()
488 return self.dirstate.getcwd()
579
489
580 def pathto(self, f, cwd=None):
490 def pathto(self, f, cwd=None):
581 return self.dirstate.pathto(f, cwd)
491 return self.dirstate.pathto(f, cwd)
582
492
583 def wfile(self, f, mode='r'):
493 def wfile(self, f, mode='r'):
584 return self.wopener(f, mode)
494 return self.wopener(f, mode)
585
495
586 def _link(self, f):
496 def _link(self, f):
587 return os.path.islink(self.wjoin(f))
497 return os.path.islink(self.wjoin(f))
588
498
589 def _filter(self, filter, filename, data):
499 def _filter(self, filter, filename, data):
590 if filter not in self.filterpats:
500 if filter not in self.filterpats:
591 l = []
501 l = []
592 for pat, cmd in self.ui.configitems(filter):
502 for pat, cmd in self.ui.configitems(filter):
593 if cmd == '!':
503 if cmd == '!':
594 continue
504 continue
595 mf = match_.match(self.root, '', [pat])
505 mf = match_.match(self.root, '', [pat])
596 fn = None
506 fn = None
597 params = cmd
507 params = cmd
598 for name, filterfn in self._datafilters.iteritems():
508 for name, filterfn in self._datafilters.iteritems():
599 if cmd.startswith(name):
509 if cmd.startswith(name):
600 fn = filterfn
510 fn = filterfn
601 params = cmd[len(name):].lstrip()
511 params = cmd[len(name):].lstrip()
602 break
512 break
603 if not fn:
513 if not fn:
604 fn = lambda s, c, **kwargs: util.filter(s, c)
514 fn = lambda s, c, **kwargs: util.filter(s, c)
605 # Wrap old filters not supporting keyword arguments
515 # Wrap old filters not supporting keyword arguments
606 if not inspect.getargspec(fn)[2]:
516 if not inspect.getargspec(fn)[2]:
607 oldfn = fn
517 oldfn = fn
608 fn = lambda s, c, **kwargs: oldfn(s, c)
518 fn = lambda s, c, **kwargs: oldfn(s, c)
609 l.append((mf, fn, params))
519 l.append((mf, fn, params))
610 self.filterpats[filter] = l
520 self.filterpats[filter] = l
611
521
612 for mf, fn, cmd in self.filterpats[filter]:
522 for mf, fn, cmd in self.filterpats[filter]:
613 if mf(filename):
523 if mf(filename):
614 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
615 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
616 break
526 break
617
527
618 return data
528 return data
619
529
620 def adddatafilter(self, name, filter):
530 def adddatafilter(self, name, filter):
621 self._datafilters[name] = filter
531 self._datafilters[name] = filter
622
532
623 def wread(self, filename):
533 def wread(self, filename):
624 if self._link(filename):
534 if self._link(filename):
625 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
626 else:
536 else:
627 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
628 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
629
539
630 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
631 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
632 try:
542 try:
633 os.unlink(self.wjoin(filename))
543 os.unlink(self.wjoin(filename))
634 except OSError:
544 except OSError:
635 pass
545 pass
636 if 'l' in flags:
546 if 'l' in flags:
637 self.wopener.symlink(data, filename)
547 self.wopener.symlink(data, filename)
638 else:
548 else:
639 self.wopener(filename, 'w').write(data)
549 self.wopener(filename, 'w').write(data)
640 if 'x' in flags:
550 if 'x' in flags:
641 util.set_flags(self.wjoin(filename), False, True)
551 util.set_flags(self.wjoin(filename), False, True)
642
552
643 def wwritedata(self, filename, data):
553 def wwritedata(self, filename, data):
644 return self._filter("decode", filename, data)
554 return self._filter("decode", filename, data)
645
555
646 def transaction(self):
556 def transaction(self):
647 tr = self._transref and self._transref() or None
557 tr = self._transref and self._transref() or None
648 if tr and tr.running():
558 if tr and tr.running():
649 return tr.nest()
559 return tr.nest()
650
560
651 # abort here if the journal already exists
561 # abort here if the journal already exists
652 if os.path.exists(self.sjoin("journal")):
562 if os.path.exists(self.sjoin("journal")):
653 raise error.RepoError(_("journal already exists - run hg recover"))
563 raise error.RepoError(_("journal already exists - run hg recover"))
654
564
655 # save dirstate for rollback
565 # save dirstate for rollback
656 try:
566 try:
657 ds = self.opener("dirstate").read()
567 ds = self.opener("dirstate").read()
658 except IOError:
568 except IOError:
659 ds = ""
569 ds = ""
660 self.opener("journal.dirstate", "w").write(ds)
570 self.opener("journal.dirstate", "w").write(ds)
661 self.opener("journal.branch", "w").write(self.dirstate.branch())
571 self.opener("journal.branch", "w").write(self.dirstate.branch())
662
572
663 renames = [(self.sjoin("journal"), self.sjoin("undo")),
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 (self.join("journal.dirstate"), self.join("undo.dirstate")),
574 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 (self.join("journal.branch"), self.join("undo.branch"))]
575 (self.join("journal.branch"), self.join("undo.branch"))]
666 tr = transaction.transaction(self.ui.warn, self.sopener,
576 tr = transaction.transaction(self.ui.warn, self.sopener,
667 self.sjoin("journal"),
577 self.sjoin("journal"),
668 aftertrans(renames),
578 aftertrans(renames),
669 self.store.createmode)
579 self.store.createmode)
670 self._transref = weakref.ref(tr)
580 self._transref = weakref.ref(tr)
671 return tr
581 return tr
672
582
673 def recover(self):
583 def recover(self):
674 lock = self.lock()
584 lock = self.lock()
675 try:
585 try:
676 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
677 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
678 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
588 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
679 self.invalidate()
589 self.invalidate()
680 return True
590 return True
681 else:
591 else:
682 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
683 return False
593 return False
684 finally:
594 finally:
685 lock.release()
595 lock.release()
686
596
687 def rollback(self):
597 def rollback(self):
688 wlock = lock = None
598 wlock = lock = None
689 try:
599 try:
690 wlock = self.wlock()
600 wlock = self.wlock()
691 lock = self.lock()
601 lock = self.lock()
692 if os.path.exists(self.sjoin("undo")):
602 if os.path.exists(self.sjoin("undo")):
693 self.ui.status(_("rolling back last transaction\n"))
603 self.ui.status(_("rolling back last transaction\n"))
694 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
604 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
695 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
605 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
696 try:
606 try:
697 branch = self.opener("undo.branch").read()
607 branch = self.opener("undo.branch").read()
698 self.dirstate.setbranch(branch)
608 self.dirstate.setbranch(branch)
699 except IOError:
609 except IOError:
700 self.ui.warn(_("Named branch could not be reset, "
610 self.ui.warn(_("Named branch could not be reset, "
701 "current branch still is: %s\n")
611 "current branch still is: %s\n")
702 % encoding.tolocal(self.dirstate.branch()))
612 % encoding.tolocal(self.dirstate.branch()))
703 self.invalidate()
613 self.invalidate()
704 self.dirstate.invalidate()
614 self.dirstate.invalidate()
705 else:
615 else:
706 self.ui.warn(_("no rollback information available\n"))
616 self.ui.warn(_("no rollback information available\n"))
707 finally:
617 finally:
708 release(lock, wlock)
618 release(lock, wlock)
709
619
710 def invalidate(self):
620 def invalidate(self):
711 for a in "changelog manifest".split():
621 for a in "changelog manifest".split():
712 if a in self.__dict__:
622 if a in self.__dict__:
713 delattr(self, a)
623 delattr(self, a)
714 self._tags = None
624 self._tags = None
715 self._tagtypes = None
625 self._tagtypes = None
716 self.nodetagscache = None
626 self.nodetagscache = None
717 self.branchcache = None
627 self.branchcache = None
718 self._ubranchcache = None
628 self._ubranchcache = None
719 self._branchcachetip = None
629 self._branchcachetip = None
720
630
721 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
631 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
722 try:
632 try:
723 l = lock.lock(lockname, 0, releasefn, desc=desc)
633 l = lock.lock(lockname, 0, releasefn, desc=desc)
724 except error.LockHeld, inst:
634 except error.LockHeld, inst:
725 if not wait:
635 if not wait:
726 raise
636 raise
727 self.ui.warn(_("waiting for lock on %s held by %r\n") %
637 self.ui.warn(_("waiting for lock on %s held by %r\n") %
728 (desc, inst.locker))
638 (desc, inst.locker))
729 # default to 600 seconds timeout
639 # default to 600 seconds timeout
730 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
640 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
731 releasefn, desc=desc)
641 releasefn, desc=desc)
732 if acquirefn:
642 if acquirefn:
733 acquirefn()
643 acquirefn()
734 return l
644 return l
735
645
736 def lock(self, wait=True):
646 def lock(self, wait=True):
737 l = self._lockref and self._lockref()
647 l = self._lockref and self._lockref()
738 if l is not None and l.held:
648 if l is not None and l.held:
739 l.lock()
649 l.lock()
740 return l
650 return l
741
651
742 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
743 _('repository %s') % self.origroot)
653 _('repository %s') % self.origroot)
744 self._lockref = weakref.ref(l)
654 self._lockref = weakref.ref(l)
745 return l
655 return l
746
656
747 def wlock(self, wait=True):
657 def wlock(self, wait=True):
748 l = self._wlockref and self._wlockref()
658 l = self._wlockref and self._wlockref()
749 if l is not None and l.held:
659 if l is not None and l.held:
750 l.lock()
660 l.lock()
751 return l
661 return l
752
662
753 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
663 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
754 self.dirstate.invalidate, _('working directory of %s') %
664 self.dirstate.invalidate, _('working directory of %s') %
755 self.origroot)
665 self.origroot)
756 self._wlockref = weakref.ref(l)
666 self._wlockref = weakref.ref(l)
757 return l
667 return l
758
668
759 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
669 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
760 """
670 """
761 commit an individual file as part of a larger transaction
671 commit an individual file as part of a larger transaction
762 """
672 """
763
673
764 fname = fctx.path()
674 fname = fctx.path()
765 text = fctx.data()
675 text = fctx.data()
766 flog = self.file(fname)
676 flog = self.file(fname)
767 fparent1 = manifest1.get(fname, nullid)
677 fparent1 = manifest1.get(fname, nullid)
768 fparent2 = fparent2o = manifest2.get(fname, nullid)
678 fparent2 = fparent2o = manifest2.get(fname, nullid)
769
679
770 meta = {}
680 meta = {}
771 copy = fctx.renamed()
681 copy = fctx.renamed()
772 if copy and copy[0] != fname:
682 if copy and copy[0] != fname:
773 # Mark the new revision of this file as a copy of another
683 # Mark the new revision of this file as a copy of another
774 # file. This copy data will effectively act as a parent
684 # file. This copy data will effectively act as a parent
775 # of this new revision. If this is a merge, the first
685 # of this new revision. If this is a merge, the first
776 # parent will be the nullid (meaning "look up the copy data")
686 # parent will be the nullid (meaning "look up the copy data")
777 # and the second one will be the other parent. For example:
687 # and the second one will be the other parent. For example:
778 #
688 #
779 # 0 --- 1 --- 3 rev1 changes file foo
689 # 0 --- 1 --- 3 rev1 changes file foo
780 # \ / rev2 renames foo to bar and changes it
690 # \ / rev2 renames foo to bar and changes it
781 # \- 2 -/ rev3 should have bar with all changes and
691 # \- 2 -/ rev3 should have bar with all changes and
782 # should record that bar descends from
692 # should record that bar descends from
783 # bar in rev2 and foo in rev1
693 # bar in rev2 and foo in rev1
784 #
694 #
785 # this allows this merge to succeed:
695 # this allows this merge to succeed:
786 #
696 #
787 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
697 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
788 # \ / merging rev3 and rev4 should use bar@rev2
698 # \ / merging rev3 and rev4 should use bar@rev2
789 # \- 2 --- 4 as the merge base
699 # \- 2 --- 4 as the merge base
790 #
700 #
791
701
792 cfname = copy[0]
702 cfname = copy[0]
793 crev = manifest1.get(cfname)
703 crev = manifest1.get(cfname)
794 newfparent = fparent2
704 newfparent = fparent2
795
705
796 if manifest2: # branch merge
706 if manifest2: # branch merge
797 if fparent2 == nullid or crev is None: # copied on remote side
707 if fparent2 == nullid or crev is None: # copied on remote side
798 if cfname in manifest2:
708 if cfname in manifest2:
799 crev = manifest2[cfname]
709 crev = manifest2[cfname]
800 newfparent = fparent1
710 newfparent = fparent1
801
711
802 # find source in nearest ancestor if we've lost track
712 # find source in nearest ancestor if we've lost track
803 if not crev:
713 if not crev:
804 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
714 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
805 (fname, cfname))
715 (fname, cfname))
806 for ancestor in self['.'].ancestors():
716 for ancestor in self['.'].ancestors():
807 if cfname in ancestor:
717 if cfname in ancestor:
808 crev = ancestor[cfname].filenode()
718 crev = ancestor[cfname].filenode()
809 break
719 break
810
720
811 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
721 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
812 meta["copy"] = cfname
722 meta["copy"] = cfname
813 meta["copyrev"] = hex(crev)
723 meta["copyrev"] = hex(crev)
814 fparent1, fparent2 = nullid, newfparent
724 fparent1, fparent2 = nullid, newfparent
815 elif fparent2 != nullid:
725 elif fparent2 != nullid:
816 # is one parent an ancestor of the other?
726 # is one parent an ancestor of the other?
817 fparentancestor = flog.ancestor(fparent1, fparent2)
727 fparentancestor = flog.ancestor(fparent1, fparent2)
818 if fparentancestor == fparent1:
728 if fparentancestor == fparent1:
819 fparent1, fparent2 = fparent2, nullid
729 fparent1, fparent2 = fparent2, nullid
820 elif fparentancestor == fparent2:
730 elif fparentancestor == fparent2:
821 fparent2 = nullid
731 fparent2 = nullid
822
732
823 # is the file changed?
733 # is the file changed?
824 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
734 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
825 changelist.append(fname)
735 changelist.append(fname)
826 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
736 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
827
737
828 # are just the flags changed during merge?
738 # are just the flags changed during merge?
829 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
739 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
830 changelist.append(fname)
740 changelist.append(fname)
831
741
832 return fparent1
742 return fparent1
833
743
834 def commit(self, text="", user=None, date=None, match=None, force=False,
744 def commit(self, text="", user=None, date=None, match=None, force=False,
835 editor=False, extra={}):
745 editor=False, extra={}):
836 """Add a new revision to current repository.
746 """Add a new revision to current repository.
837
747
838 Revision information is gathered from the working directory,
748 Revision information is gathered from the working directory,
839 match can be used to filter the committed files. If editor is
749 match can be used to filter the committed files. If editor is
840 supplied, it is called to get a commit message.
750 supplied, it is called to get a commit message.
841 """
751 """
842
752
843 def fail(f, msg):
753 def fail(f, msg):
844 raise util.Abort('%s: %s' % (f, msg))
754 raise util.Abort('%s: %s' % (f, msg))
845
755
846 if not match:
756 if not match:
847 match = match_.always(self.root, '')
757 match = match_.always(self.root, '')
848
758
849 if not force:
759 if not force:
850 vdirs = []
760 vdirs = []
851 match.dir = vdirs.append
761 match.dir = vdirs.append
852 match.bad = fail
762 match.bad = fail
853
763
854 wlock = self.wlock()
764 wlock = self.wlock()
855 try:
765 try:
856 p1, p2 = self.dirstate.parents()
766 p1, p2 = self.dirstate.parents()
857 wctx = self[None]
767 wctx = self[None]
858
768
859 if (not force and p2 != nullid and match and
769 if (not force and p2 != nullid and match and
860 (match.files() or match.anypats())):
770 (match.files() or match.anypats())):
861 raise util.Abort(_('cannot partially commit a merge '
771 raise util.Abort(_('cannot partially commit a merge '
862 '(do not specify files or patterns)'))
772 '(do not specify files or patterns)'))
863
773
864 changes = self.status(match=match, clean=force)
774 changes = self.status(match=match, clean=force)
865 if force:
775 if force:
866 changes[0].extend(changes[6]) # mq may commit unchanged files
776 changes[0].extend(changes[6]) # mq may commit unchanged files
867
777
868 # check subrepos
778 # check subrepos
869 subs = []
779 subs = []
870 for s in wctx.substate:
780 for s in wctx.substate:
871 if match(s) and wctx.sub(s).dirty():
781 if match(s) and wctx.sub(s).dirty():
872 subs.append(s)
782 subs.append(s)
873 if subs and '.hgsubstate' not in changes[0]:
783 if subs and '.hgsubstate' not in changes[0]:
874 changes[0].insert(0, '.hgsubstate')
784 changes[0].insert(0, '.hgsubstate')
875
785
876 # make sure all explicit patterns are matched
786 # make sure all explicit patterns are matched
877 if not force and match.files():
787 if not force and match.files():
878 matched = set(changes[0] + changes[1] + changes[2])
788 matched = set(changes[0] + changes[1] + changes[2])
879
789
880 for f in match.files():
790 for f in match.files():
881 if f == '.' or f in matched or f in wctx.substate:
791 if f == '.' or f in matched or f in wctx.substate:
882 continue
792 continue
883 if f in changes[3]: # missing
793 if f in changes[3]: # missing
884 fail(f, _('file not found!'))
794 fail(f, _('file not found!'))
885 if f in vdirs: # visited directory
795 if f in vdirs: # visited directory
886 d = f + '/'
796 d = f + '/'
887 for mf in matched:
797 for mf in matched:
888 if mf.startswith(d):
798 if mf.startswith(d):
889 break
799 break
890 else:
800 else:
891 fail(f, _("no match under directory!"))
801 fail(f, _("no match under directory!"))
892 elif f not in self.dirstate:
802 elif f not in self.dirstate:
893 fail(f, _("file not tracked!"))
803 fail(f, _("file not tracked!"))
894
804
895 if (not force and not extra.get("close") and p2 == nullid
805 if (not force and not extra.get("close") and p2 == nullid
896 and not (changes[0] or changes[1] or changes[2])
806 and not (changes[0] or changes[1] or changes[2])
897 and self[None].branch() == self['.'].branch()):
807 and self[None].branch() == self['.'].branch()):
898 return None
808 return None
899
809
900 ms = merge_.mergestate(self)
810 ms = merge_.mergestate(self)
901 for f in changes[0]:
811 for f in changes[0]:
902 if f in ms and ms[f] == 'u':
812 if f in ms and ms[f] == 'u':
903 raise util.Abort(_("unresolved merge conflicts "
813 raise util.Abort(_("unresolved merge conflicts "
904 "(see hg resolve)"))
814 "(see hg resolve)"))
905
815
906 cctx = context.workingctx(self, (p1, p2), text, user, date,
816 cctx = context.workingctx(self, (p1, p2), text, user, date,
907 extra, changes)
817 extra, changes)
908 if editor:
818 if editor:
909 cctx._text = editor(self, cctx, subs)
819 cctx._text = editor(self, cctx, subs)
910
820
911 # commit subs
821 # commit subs
912 if subs:
822 if subs:
913 state = wctx.substate.copy()
823 state = wctx.substate.copy()
914 for s in subs:
824 for s in subs:
915 self.ui.status(_('committing subrepository %s\n') % s)
825 self.ui.status(_('committing subrepository %s\n') % s)
916 sr = wctx.sub(s).commit(cctx._text, user, date)
826 sr = wctx.sub(s).commit(cctx._text, user, date)
917 state[s] = (state[s][0], sr)
827 state[s] = (state[s][0], sr)
918 subrepo.writestate(self, state)
828 subrepo.writestate(self, state)
919
829
920 ret = self.commitctx(cctx, True)
830 ret = self.commitctx(cctx, True)
921
831
922 # update dirstate and mergestate
832 # update dirstate and mergestate
923 for f in changes[0] + changes[1]:
833 for f in changes[0] + changes[1]:
924 self.dirstate.normal(f)
834 self.dirstate.normal(f)
925 for f in changes[2]:
835 for f in changes[2]:
926 self.dirstate.forget(f)
836 self.dirstate.forget(f)
927 self.dirstate.setparents(ret)
837 self.dirstate.setparents(ret)
928 ms.reset()
838 ms.reset()
929
839
930 return ret
840 return ret
931
841
932 finally:
842 finally:
933 wlock.release()
843 wlock.release()
934
844
935 def commitctx(self, ctx, error=False):
845 def commitctx(self, ctx, error=False):
936 """Add a new revision to current repository.
846 """Add a new revision to current repository.
937
847
938 Revision information is passed via the context argument.
848 Revision information is passed via the context argument.
939 """
849 """
940
850
941 tr = lock = None
851 tr = lock = None
942 removed = ctx.removed()
852 removed = ctx.removed()
943 p1, p2 = ctx.p1(), ctx.p2()
853 p1, p2 = ctx.p1(), ctx.p2()
944 m1 = p1.manifest().copy()
854 m1 = p1.manifest().copy()
945 m2 = p2.manifest()
855 m2 = p2.manifest()
946 user = ctx.user()
856 user = ctx.user()
947
857
948 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
858 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
949 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
859 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
950
860
951 lock = self.lock()
861 lock = self.lock()
952 try:
862 try:
953 tr = self.transaction()
863 tr = self.transaction()
954 trp = weakref.proxy(tr)
864 trp = weakref.proxy(tr)
955
865
956 # check in files
866 # check in files
957 new = {}
867 new = {}
958 changed = []
868 changed = []
959 linkrev = len(self)
869 linkrev = len(self)
960 for f in sorted(ctx.modified() + ctx.added()):
870 for f in sorted(ctx.modified() + ctx.added()):
961 self.ui.note(f + "\n")
871 self.ui.note(f + "\n")
962 try:
872 try:
963 fctx = ctx[f]
873 fctx = ctx[f]
964 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
874 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
965 changed)
875 changed)
966 m1.set(f, fctx.flags())
876 m1.set(f, fctx.flags())
967 except (OSError, IOError):
877 except (OSError, IOError):
968 if error:
878 if error:
969 self.ui.warn(_("trouble committing %s!\n") % f)
879 self.ui.warn(_("trouble committing %s!\n") % f)
970 raise
880 raise
971 else:
881 else:
972 removed.append(f)
882 removed.append(f)
973
883
974 # update manifest
884 # update manifest
975 m1.update(new)
885 m1.update(new)
976 removed = [f for f in sorted(removed) if f in m1 or f in m2]
886 removed = [f for f in sorted(removed) if f in m1 or f in m2]
977 drop = [f for f in removed if f in m1]
887 drop = [f for f in removed if f in m1]
978 for f in drop:
888 for f in drop:
979 del m1[f]
889 del m1[f]
980 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
890 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
981 p2.manifestnode(), (new, drop))
891 p2.manifestnode(), (new, drop))
982
892
983 # update changelog
893 # update changelog
984 self.changelog.delayupdate()
894 self.changelog.delayupdate()
985 n = self.changelog.add(mn, changed + removed, ctx.description(),
895 n = self.changelog.add(mn, changed + removed, ctx.description(),
986 trp, p1.node(), p2.node(),
896 trp, p1.node(), p2.node(),
987 user, ctx.date(), ctx.extra().copy())
897 user, ctx.date(), ctx.extra().copy())
988 p = lambda: self.changelog.writepending() and self.root or ""
898 p = lambda: self.changelog.writepending() and self.root or ""
989 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
899 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
990 parent2=xp2, pending=p)
900 parent2=xp2, pending=p)
991 self.changelog.finalize(trp)
901 self.changelog.finalize(trp)
992 tr.close()
902 tr.close()
993
903
994 if self.branchcache:
904 if self.branchcache:
995 self.branchtags()
905 self.branchtags()
996
906
997 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
907 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
998 return n
908 return n
999 finally:
909 finally:
1000 del tr
910 del tr
1001 lock.release()
911 lock.release()
1002
912
1003 def walk(self, match, node=None):
913 def walk(self, match, node=None):
1004 '''
914 '''
1005 walk recursively through the directory tree or a given
915 walk recursively through the directory tree or a given
1006 changeset, finding all files matched by the match
916 changeset, finding all files matched by the match
1007 function
917 function
1008 '''
918 '''
1009 return self[node].walk(match)
919 return self[node].walk(match)
1010
920
1011 def status(self, node1='.', node2=None, match=None,
921 def status(self, node1='.', node2=None, match=None,
1012 ignored=False, clean=False, unknown=False):
922 ignored=False, clean=False, unknown=False):
1013 """return status of files between two nodes or node and working directory
923 """return status of files between two nodes or node and working directory
1014
924
1015 If node1 is None, use the first dirstate parent instead.
925 If node1 is None, use the first dirstate parent instead.
1016 If node2 is None, compare node1 with working directory.
926 If node2 is None, compare node1 with working directory.
1017 """
927 """
1018
928
1019 def mfmatches(ctx):
929 def mfmatches(ctx):
1020 mf = ctx.manifest().copy()
930 mf = ctx.manifest().copy()
1021 for fn in mf.keys():
931 for fn in mf.keys():
1022 if not match(fn):
932 if not match(fn):
1023 del mf[fn]
933 del mf[fn]
1024 return mf
934 return mf
1025
935
1026 if isinstance(node1, context.changectx):
936 if isinstance(node1, context.changectx):
1027 ctx1 = node1
937 ctx1 = node1
1028 else:
938 else:
1029 ctx1 = self[node1]
939 ctx1 = self[node1]
1030 if isinstance(node2, context.changectx):
940 if isinstance(node2, context.changectx):
1031 ctx2 = node2
941 ctx2 = node2
1032 else:
942 else:
1033 ctx2 = self[node2]
943 ctx2 = self[node2]
1034
944
1035 working = ctx2.rev() is None
945 working = ctx2.rev() is None
1036 parentworking = working and ctx1 == self['.']
946 parentworking = working and ctx1 == self['.']
1037 match = match or match_.always(self.root, self.getcwd())
947 match = match or match_.always(self.root, self.getcwd())
1038 listignored, listclean, listunknown = ignored, clean, unknown
948 listignored, listclean, listunknown = ignored, clean, unknown
1039
949
1040 # load earliest manifest first for caching reasons
950 # load earliest manifest first for caching reasons
1041 if not working and ctx2.rev() < ctx1.rev():
951 if not working and ctx2.rev() < ctx1.rev():
1042 ctx2.manifest()
952 ctx2.manifest()
1043
953
1044 if not parentworking:
954 if not parentworking:
1045 def bad(f, msg):
955 def bad(f, msg):
1046 if f not in ctx1:
956 if f not in ctx1:
1047 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
957 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1048 match.bad = bad
958 match.bad = bad
1049
959
1050 if working: # we need to scan the working dir
960 if working: # we need to scan the working dir
1051 s = self.dirstate.status(match, listignored, listclean, listunknown)
961 s = self.dirstate.status(match, listignored, listclean, listunknown)
1052 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
962 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1053
963
1054 # check for any possibly clean files
964 # check for any possibly clean files
1055 if parentworking and cmp:
965 if parentworking and cmp:
1056 fixup = []
966 fixup = []
1057 # do a full compare of any files that might have changed
967 # do a full compare of any files that might have changed
1058 for f in sorted(cmp):
968 for f in sorted(cmp):
1059 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
969 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1060 or ctx1[f].cmp(ctx2[f].data())):
970 or ctx1[f].cmp(ctx2[f].data())):
1061 modified.append(f)
971 modified.append(f)
1062 else:
972 else:
1063 fixup.append(f)
973 fixup.append(f)
1064
974
1065 if listclean:
975 if listclean:
1066 clean += fixup
976 clean += fixup
1067
977
1068 # update dirstate for files that are actually clean
978 # update dirstate for files that are actually clean
1069 if fixup:
979 if fixup:
1070 try:
980 try:
1071 # updating the dirstate is optional
981 # updating the dirstate is optional
1072 # so we don't wait on the lock
982 # so we don't wait on the lock
1073 wlock = self.wlock(False)
983 wlock = self.wlock(False)
1074 try:
984 try:
1075 for f in fixup:
985 for f in fixup:
1076 self.dirstate.normal(f)
986 self.dirstate.normal(f)
1077 finally:
987 finally:
1078 wlock.release()
988 wlock.release()
1079 except error.LockError:
989 except error.LockError:
1080 pass
990 pass
1081
991
1082 if not parentworking:
992 if not parentworking:
1083 mf1 = mfmatches(ctx1)
993 mf1 = mfmatches(ctx1)
1084 if working:
994 if working:
1085 # we are comparing working dir against non-parent
995 # we are comparing working dir against non-parent
1086 # generate a pseudo-manifest for the working dir
996 # generate a pseudo-manifest for the working dir
1087 mf2 = mfmatches(self['.'])
997 mf2 = mfmatches(self['.'])
1088 for f in cmp + modified + added:
998 for f in cmp + modified + added:
1089 mf2[f] = None
999 mf2[f] = None
1090 mf2.set(f, ctx2.flags(f))
1000 mf2.set(f, ctx2.flags(f))
1091 for f in removed:
1001 for f in removed:
1092 if f in mf2:
1002 if f in mf2:
1093 del mf2[f]
1003 del mf2[f]
1094 else:
1004 else:
1095 # we are comparing two revisions
1005 # we are comparing two revisions
1096 deleted, unknown, ignored = [], [], []
1006 deleted, unknown, ignored = [], [], []
1097 mf2 = mfmatches(ctx2)
1007 mf2 = mfmatches(ctx2)
1098
1008
1099 modified, added, clean = [], [], []
1009 modified, added, clean = [], [], []
1100 for fn in mf2:
1010 for fn in mf2:
1101 if fn in mf1:
1011 if fn in mf1:
1102 if (mf1.flags(fn) != mf2.flags(fn) or
1012 if (mf1.flags(fn) != mf2.flags(fn) or
1103 (mf1[fn] != mf2[fn] and
1013 (mf1[fn] != mf2[fn] and
1104 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1014 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1105 modified.append(fn)
1015 modified.append(fn)
1106 elif listclean:
1016 elif listclean:
1107 clean.append(fn)
1017 clean.append(fn)
1108 del mf1[fn]
1018 del mf1[fn]
1109 else:
1019 else:
1110 added.append(fn)
1020 added.append(fn)
1111 removed = mf1.keys()
1021 removed = mf1.keys()
1112
1022
1113 r = modified, added, removed, deleted, unknown, ignored, clean
1023 r = modified, added, removed, deleted, unknown, ignored, clean
1114 [l.sort() for l in r]
1024 [l.sort() for l in r]
1115 return r
1025 return r
1116
1026
1117 def add(self, list):
1027 def add(self, list):
1118 wlock = self.wlock()
1028 wlock = self.wlock()
1119 try:
1029 try:
1120 rejected = []
1030 rejected = []
1121 for f in list:
1031 for f in list:
1122 p = self.wjoin(f)
1032 p = self.wjoin(f)
1123 try:
1033 try:
1124 st = os.lstat(p)
1034 st = os.lstat(p)
1125 except:
1035 except:
1126 self.ui.warn(_("%s does not exist!\n") % f)
1036 self.ui.warn(_("%s does not exist!\n") % f)
1127 rejected.append(f)
1037 rejected.append(f)
1128 continue
1038 continue
1129 if st.st_size > 10000000:
1039 if st.st_size > 10000000:
1130 self.ui.warn(_("%s: files over 10MB may cause memory and"
1040 self.ui.warn(_("%s: files over 10MB may cause memory and"
1131 " performance problems\n"
1041 " performance problems\n"
1132 "(use 'hg revert %s' to unadd the file)\n")
1042 "(use 'hg revert %s' to unadd the file)\n")
1133 % (f, f))
1043 % (f, f))
1134 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1044 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1135 self.ui.warn(_("%s not added: only files and symlinks "
1045 self.ui.warn(_("%s not added: only files and symlinks "
1136 "supported currently\n") % f)
1046 "supported currently\n") % f)
1137 rejected.append(p)
1047 rejected.append(p)
1138 elif self.dirstate[f] in 'amn':
1048 elif self.dirstate[f] in 'amn':
1139 self.ui.warn(_("%s already tracked!\n") % f)
1049 self.ui.warn(_("%s already tracked!\n") % f)
1140 elif self.dirstate[f] == 'r':
1050 elif self.dirstate[f] == 'r':
1141 self.dirstate.normallookup(f)
1051 self.dirstate.normallookup(f)
1142 else:
1052 else:
1143 self.dirstate.add(f)
1053 self.dirstate.add(f)
1144 return rejected
1054 return rejected
1145 finally:
1055 finally:
1146 wlock.release()
1056 wlock.release()
1147
1057
1148 def forget(self, list):
1058 def forget(self, list):
1149 wlock = self.wlock()
1059 wlock = self.wlock()
1150 try:
1060 try:
1151 for f in list:
1061 for f in list:
1152 if self.dirstate[f] != 'a':
1062 if self.dirstate[f] != 'a':
1153 self.ui.warn(_("%s not added!\n") % f)
1063 self.ui.warn(_("%s not added!\n") % f)
1154 else:
1064 else:
1155 self.dirstate.forget(f)
1065 self.dirstate.forget(f)
1156 finally:
1066 finally:
1157 wlock.release()
1067 wlock.release()
1158
1068
1159 def remove(self, list, unlink=False):
1069 def remove(self, list, unlink=False):
1160 if unlink:
1070 if unlink:
1161 for f in list:
1071 for f in list:
1162 try:
1072 try:
1163 util.unlink(self.wjoin(f))
1073 util.unlink(self.wjoin(f))
1164 except OSError, inst:
1074 except OSError, inst:
1165 if inst.errno != errno.ENOENT:
1075 if inst.errno != errno.ENOENT:
1166 raise
1076 raise
1167 wlock = self.wlock()
1077 wlock = self.wlock()
1168 try:
1078 try:
1169 for f in list:
1079 for f in list:
1170 if unlink and os.path.exists(self.wjoin(f)):
1080 if unlink and os.path.exists(self.wjoin(f)):
1171 self.ui.warn(_("%s still exists!\n") % f)
1081 self.ui.warn(_("%s still exists!\n") % f)
1172 elif self.dirstate[f] == 'a':
1082 elif self.dirstate[f] == 'a':
1173 self.dirstate.forget(f)
1083 self.dirstate.forget(f)
1174 elif f not in self.dirstate:
1084 elif f not in self.dirstate:
1175 self.ui.warn(_("%s not tracked!\n") % f)
1085 self.ui.warn(_("%s not tracked!\n") % f)
1176 else:
1086 else:
1177 self.dirstate.remove(f)
1087 self.dirstate.remove(f)
1178 finally:
1088 finally:
1179 wlock.release()
1089 wlock.release()
1180
1090
1181 def undelete(self, list):
1091 def undelete(self, list):
1182 manifests = [self.manifest.read(self.changelog.read(p)[0])
1092 manifests = [self.manifest.read(self.changelog.read(p)[0])
1183 for p in self.dirstate.parents() if p != nullid]
1093 for p in self.dirstate.parents() if p != nullid]
1184 wlock = self.wlock()
1094 wlock = self.wlock()
1185 try:
1095 try:
1186 for f in list:
1096 for f in list:
1187 if self.dirstate[f] != 'r':
1097 if self.dirstate[f] != 'r':
1188 self.ui.warn(_("%s not removed!\n") % f)
1098 self.ui.warn(_("%s not removed!\n") % f)
1189 else:
1099 else:
1190 m = f in manifests[0] and manifests[0] or manifests[1]
1100 m = f in manifests[0] and manifests[0] or manifests[1]
1191 t = self.file(f).read(m[f])
1101 t = self.file(f).read(m[f])
1192 self.wwrite(f, t, m.flags(f))
1102 self.wwrite(f, t, m.flags(f))
1193 self.dirstate.normal(f)
1103 self.dirstate.normal(f)
1194 finally:
1104 finally:
1195 wlock.release()
1105 wlock.release()
1196
1106
1197 def copy(self, source, dest):
1107 def copy(self, source, dest):
1198 p = self.wjoin(dest)
1108 p = self.wjoin(dest)
1199 if not (os.path.exists(p) or os.path.islink(p)):
1109 if not (os.path.exists(p) or os.path.islink(p)):
1200 self.ui.warn(_("%s does not exist!\n") % dest)
1110 self.ui.warn(_("%s does not exist!\n") % dest)
1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1111 elif not (os.path.isfile(p) or os.path.islink(p)):
1202 self.ui.warn(_("copy failed: %s is not a file or a "
1112 self.ui.warn(_("copy failed: %s is not a file or a "
1203 "symbolic link\n") % dest)
1113 "symbolic link\n") % dest)
1204 else:
1114 else:
1205 wlock = self.wlock()
1115 wlock = self.wlock()
1206 try:
1116 try:
1207 if self.dirstate[dest] in '?r':
1117 if self.dirstate[dest] in '?r':
1208 self.dirstate.add(dest)
1118 self.dirstate.add(dest)
1209 self.dirstate.copy(source, dest)
1119 self.dirstate.copy(source, dest)
1210 finally:
1120 finally:
1211 wlock.release()
1121 wlock.release()
1212
1122
1213 def heads(self, start=None):
1123 def heads(self, start=None):
1214 heads = self.changelog.heads(start)
1124 heads = self.changelog.heads(start)
1215 # sort the output in rev descending order
1125 # sort the output in rev descending order
1216 heads = [(-self.changelog.rev(h), h) for h in heads]
1126 heads = [(-self.changelog.rev(h), h) for h in heads]
1217 return [n for (r, n) in sorted(heads)]
1127 return [n for (r, n) in sorted(heads)]
1218
1128
1219 def branchheads(self, branch=None, start=None, closed=False):
1129 def branchheads(self, branch=None, start=None, closed=False):
1220 if branch is None:
1130 if branch is None:
1221 branch = self[None].branch()
1131 branch = self[None].branch()
1222 branches = self.branchmap()
1132 branches = self.branchmap()
1223 if branch not in branches:
1133 if branch not in branches:
1224 return []
1134 return []
1225 bheads = branches[branch]
1135 bheads = branches[branch]
1226 # the cache returns heads ordered lowest to highest
1136 # the cache returns heads ordered lowest to highest
1227 bheads.reverse()
1137 bheads.reverse()
1228 if start is not None:
1138 if start is not None:
1229 # filter out the heads that cannot be reached from startrev
1139 # filter out the heads that cannot be reached from startrev
1230 bheads = self.changelog.nodesbetween([start], bheads)[2]
1140 bheads = self.changelog.nodesbetween([start], bheads)[2]
1231 if not closed:
1141 if not closed:
1232 bheads = [h for h in bheads if
1142 bheads = [h for h in bheads if
1233 ('close' not in self.changelog.read(h)[5])]
1143 ('close' not in self.changelog.read(h)[5])]
1234 return bheads
1144 return bheads
1235
1145
1236 def branches(self, nodes):
1146 def branches(self, nodes):
1237 if not nodes:
1147 if not nodes:
1238 nodes = [self.changelog.tip()]
1148 nodes = [self.changelog.tip()]
1239 b = []
1149 b = []
1240 for n in nodes:
1150 for n in nodes:
1241 t = n
1151 t = n
1242 while 1:
1152 while 1:
1243 p = self.changelog.parents(n)
1153 p = self.changelog.parents(n)
1244 if p[1] != nullid or p[0] == nullid:
1154 if p[1] != nullid or p[0] == nullid:
1245 b.append((t, n, p[0], p[1]))
1155 b.append((t, n, p[0], p[1]))
1246 break
1156 break
1247 n = p[0]
1157 n = p[0]
1248 return b
1158 return b
1249
1159
1250 def between(self, pairs):
1160 def between(self, pairs):
1251 r = []
1161 r = []
1252
1162
1253 for top, bottom in pairs:
1163 for top, bottom in pairs:
1254 n, l, i = top, [], 0
1164 n, l, i = top, [], 0
1255 f = 1
1165 f = 1
1256
1166
1257 while n != bottom and n != nullid:
1167 while n != bottom and n != nullid:
1258 p = self.changelog.parents(n)[0]
1168 p = self.changelog.parents(n)[0]
1259 if i == f:
1169 if i == f:
1260 l.append(n)
1170 l.append(n)
1261 f = f * 2
1171 f = f * 2
1262 n = p
1172 n = p
1263 i += 1
1173 i += 1
1264
1174
1265 r.append(l)
1175 r.append(l)
1266
1176
1267 return r
1177 return r
1268
1178
1269 def findincoming(self, remote, base=None, heads=None, force=False):
1179 def findincoming(self, remote, base=None, heads=None, force=False):
1270 """Return list of roots of the subsets of missing nodes from remote
1180 """Return list of roots of the subsets of missing nodes from remote
1271
1181
1272 If base dict is specified, assume that these nodes and their parents
1182 If base dict is specified, assume that these nodes and their parents
1273 exist on the remote side and that no child of a node of base exists
1183 exist on the remote side and that no child of a node of base exists
1274 in both remote and self.
1184 in both remote and self.
1275 Furthermore base will be updated to include the nodes that exists
1185 Furthermore base will be updated to include the nodes that exists
1276 in self and remote but no children exists in self and remote.
1186 in self and remote but no children exists in self and remote.
1277 If a list of heads is specified, return only nodes which are heads
1187 If a list of heads is specified, return only nodes which are heads
1278 or ancestors of these heads.
1188 or ancestors of these heads.
1279
1189
1280 All the ancestors of base are in self and in remote.
1190 All the ancestors of base are in self and in remote.
1281 All the descendants of the list returned are missing in self.
1191 All the descendants of the list returned are missing in self.
1282 (and so we know that the rest of the nodes are missing in remote, see
1192 (and so we know that the rest of the nodes are missing in remote, see
1283 outgoing)
1193 outgoing)
1284 """
1194 """
1285 return self.findcommonincoming(remote, base, heads, force)[1]
1195 return self.findcommonincoming(remote, base, heads, force)[1]
1286
1196
1287 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1197 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1288 """Return a tuple (common, missing roots, heads) used to identify
1198 """Return a tuple (common, missing roots, heads) used to identify
1289 missing nodes from remote.
1199 missing nodes from remote.
1290
1200
1291 If base dict is specified, assume that these nodes and their parents
1201 If base dict is specified, assume that these nodes and their parents
1292 exist on the remote side and that no child of a node of base exists
1202 exist on the remote side and that no child of a node of base exists
1293 in both remote and self.
1203 in both remote and self.
1294 Furthermore base will be updated to include the nodes that exists
1204 Furthermore base will be updated to include the nodes that exists
1295 in self and remote but no children exists in self and remote.
1205 in self and remote but no children exists in self and remote.
1296 If a list of heads is specified, return only nodes which are heads
1206 If a list of heads is specified, return only nodes which are heads
1297 or ancestors of these heads.
1207 or ancestors of these heads.
1298
1208
1299 All the ancestors of base are in self and in remote.
1209 All the ancestors of base are in self and in remote.
1300 """
1210 """
1301 m = self.changelog.nodemap
1211 m = self.changelog.nodemap
1302 search = []
1212 search = []
1303 fetch = set()
1213 fetch = set()
1304 seen = set()
1214 seen = set()
1305 seenbranch = set()
1215 seenbranch = set()
1306 if base is None:
1216 if base is None:
1307 base = {}
1217 base = {}
1308
1218
1309 if not heads:
1219 if not heads:
1310 heads = remote.heads()
1220 heads = remote.heads()
1311
1221
1312 if self.changelog.tip() == nullid:
1222 if self.changelog.tip() == nullid:
1313 base[nullid] = 1
1223 base[nullid] = 1
1314 if heads != [nullid]:
1224 if heads != [nullid]:
1315 return [nullid], [nullid], list(heads)
1225 return [nullid], [nullid], list(heads)
1316 return [nullid], [], []
1226 return [nullid], [], []
1317
1227
1318 # assume we're closer to the tip than the root
1228 # assume we're closer to the tip than the root
1319 # and start by examining the heads
1229 # and start by examining the heads
1320 self.ui.status(_("searching for changes\n"))
1230 self.ui.status(_("searching for changes\n"))
1321
1231
1322 unknown = []
1232 unknown = []
1323 for h in heads:
1233 for h in heads:
1324 if h not in m:
1234 if h not in m:
1325 unknown.append(h)
1235 unknown.append(h)
1326 else:
1236 else:
1327 base[h] = 1
1237 base[h] = 1
1328
1238
1329 heads = unknown
1239 heads = unknown
1330 if not unknown:
1240 if not unknown:
1331 return base.keys(), [], []
1241 return base.keys(), [], []
1332
1242
1333 req = set(unknown)
1243 req = set(unknown)
1334 reqcnt = 0
1244 reqcnt = 0
1335
1245
1336 # search through remote branches
1246 # search through remote branches
1337 # a 'branch' here is a linear segment of history, with four parts:
1247 # a 'branch' here is a linear segment of history, with four parts:
1338 # head, root, first parent, second parent
1248 # head, root, first parent, second parent
1339 # (a branch always has two parents (or none) by definition)
1249 # (a branch always has two parents (or none) by definition)
1340 unknown = remote.branches(unknown)
1250 unknown = remote.branches(unknown)
1341 while unknown:
1251 while unknown:
1342 r = []
1252 r = []
1343 while unknown:
1253 while unknown:
1344 n = unknown.pop(0)
1254 n = unknown.pop(0)
1345 if n[0] in seen:
1255 if n[0] in seen:
1346 continue
1256 continue
1347
1257
1348 self.ui.debug(_("examining %s:%s\n")
1258 self.ui.debug(_("examining %s:%s\n")
1349 % (short(n[0]), short(n[1])))
1259 % (short(n[0]), short(n[1])))
1350 if n[0] == nullid: # found the end of the branch
1260 if n[0] == nullid: # found the end of the branch
1351 pass
1261 pass
1352 elif n in seenbranch:
1262 elif n in seenbranch:
1353 self.ui.debug(_("branch already found\n"))
1263 self.ui.debug(_("branch already found\n"))
1354 continue
1264 continue
1355 elif n[1] and n[1] in m: # do we know the base?
1265 elif n[1] and n[1] in m: # do we know the base?
1356 self.ui.debug(_("found incomplete branch %s:%s\n")
1266 self.ui.debug(_("found incomplete branch %s:%s\n")
1357 % (short(n[0]), short(n[1])))
1267 % (short(n[0]), short(n[1])))
1358 search.append(n[0:2]) # schedule branch range for scanning
1268 search.append(n[0:2]) # schedule branch range for scanning
1359 seenbranch.add(n)
1269 seenbranch.add(n)
1360 else:
1270 else:
1361 if n[1] not in seen and n[1] not in fetch:
1271 if n[1] not in seen and n[1] not in fetch:
1362 if n[2] in m and n[3] in m:
1272 if n[2] in m and n[3] in m:
1363 self.ui.debug(_("found new changeset %s\n") %
1273 self.ui.debug(_("found new changeset %s\n") %
1364 short(n[1]))
1274 short(n[1]))
1365 fetch.add(n[1]) # earliest unknown
1275 fetch.add(n[1]) # earliest unknown
1366 for p in n[2:4]:
1276 for p in n[2:4]:
1367 if p in m:
1277 if p in m:
1368 base[p] = 1 # latest known
1278 base[p] = 1 # latest known
1369
1279
1370 for p in n[2:4]:
1280 for p in n[2:4]:
1371 if p not in req and p not in m:
1281 if p not in req and p not in m:
1372 r.append(p)
1282 r.append(p)
1373 req.add(p)
1283 req.add(p)
1374 seen.add(n[0])
1284 seen.add(n[0])
1375
1285
1376 if r:
1286 if r:
1377 reqcnt += 1
1287 reqcnt += 1
1378 self.ui.debug(_("request %d: %s\n") %
1288 self.ui.debug(_("request %d: %s\n") %
1379 (reqcnt, " ".join(map(short, r))))
1289 (reqcnt, " ".join(map(short, r))))
1380 for p in xrange(0, len(r), 10):
1290 for p in xrange(0, len(r), 10):
1381 for b in remote.branches(r[p:p+10]):
1291 for b in remote.branches(r[p:p+10]):
1382 self.ui.debug(_("received %s:%s\n") %
1292 self.ui.debug(_("received %s:%s\n") %
1383 (short(b[0]), short(b[1])))
1293 (short(b[0]), short(b[1])))
1384 unknown.append(b)
1294 unknown.append(b)
1385
1295
1386 # do binary search on the branches we found
1296 # do binary search on the branches we found
1387 while search:
1297 while search:
1388 newsearch = []
1298 newsearch = []
1389 reqcnt += 1
1299 reqcnt += 1
1390 for n, l in zip(search, remote.between(search)):
1300 for n, l in zip(search, remote.between(search)):
1391 l.append(n[1])
1301 l.append(n[1])
1392 p = n[0]
1302 p = n[0]
1393 f = 1
1303 f = 1
1394 for i in l:
1304 for i in l:
1395 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1305 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1396 if i in m:
1306 if i in m:
1397 if f <= 2:
1307 if f <= 2:
1398 self.ui.debug(_("found new branch changeset %s\n") %
1308 self.ui.debug(_("found new branch changeset %s\n") %
1399 short(p))
1309 short(p))
1400 fetch.add(p)
1310 fetch.add(p)
1401 base[i] = 1
1311 base[i] = 1
1402 else:
1312 else:
1403 self.ui.debug(_("narrowed branch search to %s:%s\n")
1313 self.ui.debug(_("narrowed branch search to %s:%s\n")
1404 % (short(p), short(i)))
1314 % (short(p), short(i)))
1405 newsearch.append((p, i))
1315 newsearch.append((p, i))
1406 break
1316 break
1407 p, f = i, f * 2
1317 p, f = i, f * 2
1408 search = newsearch
1318 search = newsearch
1409
1319
1410 # sanity check our fetch list
1320 # sanity check our fetch list
1411 for f in fetch:
1321 for f in fetch:
1412 if f in m:
1322 if f in m:
1413 raise error.RepoError(_("already have changeset ")
1323 raise error.RepoError(_("already have changeset ")
1414 + short(f[:4]))
1324 + short(f[:4]))
1415
1325
1416 if base.keys() == [nullid]:
1326 if base.keys() == [nullid]:
1417 if force:
1327 if force:
1418 self.ui.warn(_("warning: repository is unrelated\n"))
1328 self.ui.warn(_("warning: repository is unrelated\n"))
1419 else:
1329 else:
1420 raise util.Abort(_("repository is unrelated"))
1330 raise util.Abort(_("repository is unrelated"))
1421
1331
1422 self.ui.debug(_("found new changesets starting at ") +
1332 self.ui.debug(_("found new changesets starting at ") +
1423 " ".join([short(f) for f in fetch]) + "\n")
1333 " ".join([short(f) for f in fetch]) + "\n")
1424
1334
1425 self.ui.debug(_("%d total queries\n") % reqcnt)
1335 self.ui.debug(_("%d total queries\n") % reqcnt)
1426
1336
1427 return base.keys(), list(fetch), heads
1337 return base.keys(), list(fetch), heads
1428
1338
1429 def findoutgoing(self, remote, base=None, heads=None, force=False):
1339 def findoutgoing(self, remote, base=None, heads=None, force=False):
1430 """Return list of nodes that are roots of subsets not in remote
1340 """Return list of nodes that are roots of subsets not in remote
1431
1341
1432 If base dict is specified, assume that these nodes and their parents
1342 If base dict is specified, assume that these nodes and their parents
1433 exist on the remote side.
1343 exist on the remote side.
1434 If a list of heads is specified, return only nodes which are heads
1344 If a list of heads is specified, return only nodes which are heads
1435 or ancestors of these heads, and return a second element which
1345 or ancestors of these heads, and return a second element which
1436 contains all remote heads which get new children.
1346 contains all remote heads which get new children.
1437 """
1347 """
1438 if base is None:
1348 if base is None:
1439 base = {}
1349 base = {}
1440 self.findincoming(remote, base, heads, force=force)
1350 self.findincoming(remote, base, heads, force=force)
1441
1351
1442 self.ui.debug(_("common changesets up to ")
1352 self.ui.debug(_("common changesets up to ")
1443 + " ".join(map(short, base.keys())) + "\n")
1353 + " ".join(map(short, base.keys())) + "\n")
1444
1354
1445 remain = set(self.changelog.nodemap)
1355 remain = set(self.changelog.nodemap)
1446
1356
1447 # prune everything remote has from the tree
1357 # prune everything remote has from the tree
1448 remain.remove(nullid)
1358 remain.remove(nullid)
1449 remove = base.keys()
1359 remove = base.keys()
1450 while remove:
1360 while remove:
1451 n = remove.pop(0)
1361 n = remove.pop(0)
1452 if n in remain:
1362 if n in remain:
1453 remain.remove(n)
1363 remain.remove(n)
1454 for p in self.changelog.parents(n):
1364 for p in self.changelog.parents(n):
1455 remove.append(p)
1365 remove.append(p)
1456
1366
1457 # find every node whose parents have been pruned
1367 # find every node whose parents have been pruned
1458 subset = []
1368 subset = []
1459 # find every remote head that will get new children
1369 # find every remote head that will get new children
1460 updated_heads = set()
1370 updated_heads = set()
1461 for n in remain:
1371 for n in remain:
1462 p1, p2 = self.changelog.parents(n)
1372 p1, p2 = self.changelog.parents(n)
1463 if p1 not in remain and p2 not in remain:
1373 if p1 not in remain and p2 not in remain:
1464 subset.append(n)
1374 subset.append(n)
1465 if heads:
1375 if heads:
1466 if p1 in heads:
1376 if p1 in heads:
1467 updated_heads.add(p1)
1377 updated_heads.add(p1)
1468 if p2 in heads:
1378 if p2 in heads:
1469 updated_heads.add(p2)
1379 updated_heads.add(p2)
1470
1380
1471 # this is the set of all roots we have to push
1381 # this is the set of all roots we have to push
1472 if heads:
1382 if heads:
1473 return subset, list(updated_heads)
1383 return subset, list(updated_heads)
1474 else:
1384 else:
1475 return subset
1385 return subset
1476
1386
1477 def pull(self, remote, heads=None, force=False):
1387 def pull(self, remote, heads=None, force=False):
1478 lock = self.lock()
1388 lock = self.lock()
1479 try:
1389 try:
1480 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1390 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1481 force=force)
1391 force=force)
1482 if fetch == [nullid]:
1392 if fetch == [nullid]:
1483 self.ui.status(_("requesting all changes\n"))
1393 self.ui.status(_("requesting all changes\n"))
1484
1394
1485 if not fetch:
1395 if not fetch:
1486 self.ui.status(_("no changes found\n"))
1396 self.ui.status(_("no changes found\n"))
1487 return 0
1397 return 0
1488
1398
1489 if heads is None and remote.capable('changegroupsubset'):
1399 if heads is None and remote.capable('changegroupsubset'):
1490 heads = rheads
1400 heads = rheads
1491
1401
1492 if heads is None:
1402 if heads is None:
1493 cg = remote.changegroup(fetch, 'pull')
1403 cg = remote.changegroup(fetch, 'pull')
1494 else:
1404 else:
1495 if not remote.capable('changegroupsubset'):
1405 if not remote.capable('changegroupsubset'):
1496 raise util.Abort(_("Partial pull cannot be done because "
1406 raise util.Abort(_("Partial pull cannot be done because "
1497 "other repository doesn't support "
1407 "other repository doesn't support "
1498 "changegroupsubset."))
1408 "changegroupsubset."))
1499 cg = remote.changegroupsubset(fetch, heads, 'pull')
1409 cg = remote.changegroupsubset(fetch, heads, 'pull')
1500 return self.addchangegroup(cg, 'pull', remote.url())
1410 return self.addchangegroup(cg, 'pull', remote.url())
1501 finally:
1411 finally:
1502 lock.release()
1412 lock.release()
1503
1413
1504 def push(self, remote, force=False, revs=None):
1414 def push(self, remote, force=False, revs=None):
1505 # there are two ways to push to remote repo:
1415 # there are two ways to push to remote repo:
1506 #
1416 #
1507 # addchangegroup assumes local user can lock remote
1417 # addchangegroup assumes local user can lock remote
1508 # repo (local filesystem, old ssh servers).
1418 # repo (local filesystem, old ssh servers).
1509 #
1419 #
1510 # unbundle assumes local user cannot lock remote repo (new ssh
1420 # unbundle assumes local user cannot lock remote repo (new ssh
1511 # servers, http servers).
1421 # servers, http servers).
1512
1422
1513 if remote.capable('unbundle'):
1423 if remote.capable('unbundle'):
1514 return self.push_unbundle(remote, force, revs)
1424 return self.push_unbundle(remote, force, revs)
1515 return self.push_addchangegroup(remote, force, revs)
1425 return self.push_addchangegroup(remote, force, revs)
1516
1426
1517 def prepush(self, remote, force, revs):
1427 def prepush(self, remote, force, revs):
1518 common = {}
1428 common = {}
1519 remote_heads = remote.heads()
1429 remote_heads = remote.heads()
1520 inc = self.findincoming(remote, common, remote_heads, force=force)
1430 inc = self.findincoming(remote, common, remote_heads, force=force)
1521
1431
1522 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1432 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1523 if revs is not None:
1433 if revs is not None:
1524 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1434 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1525 else:
1435 else:
1526 bases, heads = update, self.changelog.heads()
1436 bases, heads = update, self.changelog.heads()
1527
1437
1528 def checkbranch(lheads, rheads, updatelh):
1438 def checkbranch(lheads, rheads, updatelh):
1529 '''
1439 '''
1530 check whether there are more local heads than remote heads on
1440 check whether there are more local heads than remote heads on
1531 a specific branch.
1441 a specific branch.
1532
1442
1533 lheads: local branch heads
1443 lheads: local branch heads
1534 rheads: remote branch heads
1444 rheads: remote branch heads
1535 updatelh: outgoing local branch heads
1445 updatelh: outgoing local branch heads
1536 '''
1446 '''
1537
1447
1538 warn = 0
1448 warn = 0
1539
1449
1540 if not revs and len(lheads) > len(rheads):
1450 if not revs and len(lheads) > len(rheads):
1541 warn = 1
1451 warn = 1
1542 else:
1452 else:
1543 updatelheads = [self.changelog.heads(x, lheads)
1453 updatelheads = [self.changelog.heads(x, lheads)
1544 for x in updatelh]
1454 for x in updatelh]
1545 newheads = set(sum(updatelheads, [])) & set(lheads)
1455 newheads = set(sum(updatelheads, [])) & set(lheads)
1546
1456
1547 if not newheads:
1457 if not newheads:
1548 return True
1458 return True
1549
1459
1550 for r in rheads:
1460 for r in rheads:
1551 if r in self.changelog.nodemap:
1461 if r in self.changelog.nodemap:
1552 desc = self.changelog.heads(r, heads)
1462 desc = self.changelog.heads(r, heads)
1553 l = [h for h in heads if h in desc]
1463 l = [h for h in heads if h in desc]
1554 if not l:
1464 if not l:
1555 newheads.add(r)
1465 newheads.add(r)
1556 else:
1466 else:
1557 newheads.add(r)
1467 newheads.add(r)
1558 if len(newheads) > len(rheads):
1468 if len(newheads) > len(rheads):
1559 warn = 1
1469 warn = 1
1560
1470
1561 if warn:
1471 if warn:
1562 if not rheads: # new branch requires --force
1472 if not rheads: # new branch requires --force
1563 self.ui.warn(_("abort: push creates new"
1473 self.ui.warn(_("abort: push creates new"
1564 " remote branch '%s'!\n") %
1474 " remote branch '%s'!\n") %
1565 self[updatelh[0]].branch())
1475 self[updatelh[0]].branch())
1566 else:
1476 else:
1567 self.ui.warn(_("abort: push creates new remote heads!\n"))
1477 self.ui.warn(_("abort: push creates new remote heads!\n"))
1568
1478
1569 self.ui.status(_("(did you forget to merge?"
1479 self.ui.status(_("(did you forget to merge?"
1570 " use push -f to force)\n"))
1480 " use push -f to force)\n"))
1571 return False
1481 return False
1572 return True
1482 return True
1573
1483
1574 if not bases:
1484 if not bases:
1575 self.ui.status(_("no changes found\n"))
1485 self.ui.status(_("no changes found\n"))
1576 return None, 1
1486 return None, 1
1577 elif not force:
1487 elif not force:
1578 # Check for each named branch if we're creating new remote heads.
1488 # Check for each named branch if we're creating new remote heads.
1579 # To be a remote head after push, node must be either:
1489 # To be a remote head after push, node must be either:
1580 # - unknown locally
1490 # - unknown locally
1581 # - a local outgoing head descended from update
1491 # - a local outgoing head descended from update
1582 # - a remote head that's known locally and not
1492 # - a remote head that's known locally and not
1583 # ancestral to an outgoing head
1493 # ancestral to an outgoing head
1584 #
1494 #
1585 # New named branches cannot be created without --force.
1495 # New named branches cannot be created without --force.
1586
1496
1587 if remote_heads != [nullid]:
1497 if remote_heads != [nullid]:
1588 if remote.capable('branchmap'):
1498 if remote.capable('branchmap'):
1589 localhds = {}
1499 localhds = {}
1590 if not revs:
1500 if not revs:
1591 localhds = self.branchmap()
1501 localhds = self.branchmap()
1592 else:
1502 else:
1593 for n in heads:
1503 for n in heads:
1594 branch = self[n].branch()
1504 branch = self[n].branch()
1595 if branch in localhds:
1505 if branch in localhds:
1596 localhds[branch].append(n)
1506 localhds[branch].append(n)
1597 else:
1507 else:
1598 localhds[branch] = [n]
1508 localhds[branch] = [n]
1599
1509
1600 remotehds = remote.branchmap()
1510 remotehds = remote.branchmap()
1601
1511
1602 for lh in localhds:
1512 for lh in localhds:
1603 if lh in remotehds:
1513 if lh in remotehds:
1604 rheads = remotehds[lh]
1514 rheads = remotehds[lh]
1605 else:
1515 else:
1606 rheads = []
1516 rheads = []
1607 lheads = localhds[lh]
1517 lheads = localhds[lh]
1608 updatelh = [upd for upd in update
1518 updatelh = [upd for upd in update
1609 if self[upd].branch() == lh]
1519 if self[upd].branch() == lh]
1610 if not updatelh:
1520 if not updatelh:
1611 continue
1521 continue
1612 if not checkbranch(lheads, rheads, updatelh):
1522 if not checkbranch(lheads, rheads, updatelh):
1613 return None, 0
1523 return None, 0
1614 else:
1524 else:
1615 if not checkbranch(heads, remote_heads, update):
1525 if not checkbranch(heads, remote_heads, update):
1616 return None, 0
1526 return None, 0
1617
1527
1618 if inc:
1528 if inc:
1619 self.ui.warn(_("note: unsynced remote changes!\n"))
1529 self.ui.warn(_("note: unsynced remote changes!\n"))
1620
1530
1621
1531
1622 if revs is None:
1532 if revs is None:
1623 # use the fast path, no race possible on push
1533 # use the fast path, no race possible on push
1624 cg = self._changegroup(common.keys(), 'push')
1534 cg = self._changegroup(common.keys(), 'push')
1625 else:
1535 else:
1626 cg = self.changegroupsubset(update, revs, 'push')
1536 cg = self.changegroupsubset(update, revs, 'push')
1627 return cg, remote_heads
1537 return cg, remote_heads
1628
1538
1629 def push_addchangegroup(self, remote, force, revs):
1539 def push_addchangegroup(self, remote, force, revs):
1630 lock = remote.lock()
1540 lock = remote.lock()
1631 try:
1541 try:
1632 ret = self.prepush(remote, force, revs)
1542 ret = self.prepush(remote, force, revs)
1633 if ret[0] is not None:
1543 if ret[0] is not None:
1634 cg, remote_heads = ret
1544 cg, remote_heads = ret
1635 return remote.addchangegroup(cg, 'push', self.url())
1545 return remote.addchangegroup(cg, 'push', self.url())
1636 return ret[1]
1546 return ret[1]
1637 finally:
1547 finally:
1638 lock.release()
1548 lock.release()
1639
1549
1640 def push_unbundle(self, remote, force, revs):
1550 def push_unbundle(self, remote, force, revs):
1641 # local repo finds heads on server, finds out what revs it
1551 # local repo finds heads on server, finds out what revs it
1642 # must push. once revs transferred, if server finds it has
1552 # must push. once revs transferred, if server finds it has
1643 # different heads (someone else won commit/push race), server
1553 # different heads (someone else won commit/push race), server
1644 # aborts.
1554 # aborts.
1645
1555
1646 ret = self.prepush(remote, force, revs)
1556 ret = self.prepush(remote, force, revs)
1647 if ret[0] is not None:
1557 if ret[0] is not None:
1648 cg, remote_heads = ret
1558 cg, remote_heads = ret
1649 if force: remote_heads = ['force']
1559 if force: remote_heads = ['force']
1650 return remote.unbundle(cg, remote_heads, 'push')
1560 return remote.unbundle(cg, remote_heads, 'push')
1651 return ret[1]
1561 return ret[1]
1652
1562
1653 def changegroupinfo(self, nodes, source):
1563 def changegroupinfo(self, nodes, source):
1654 if self.ui.verbose or source == 'bundle':
1564 if self.ui.verbose or source == 'bundle':
1655 self.ui.status(_("%d changesets found\n") % len(nodes))
1565 self.ui.status(_("%d changesets found\n") % len(nodes))
1656 if self.ui.debugflag:
1566 if self.ui.debugflag:
1657 self.ui.debug(_("list of changesets:\n"))
1567 self.ui.debug(_("list of changesets:\n"))
1658 for node in nodes:
1568 for node in nodes:
1659 self.ui.debug("%s\n" % hex(node))
1569 self.ui.debug("%s\n" % hex(node))
1660
1570
1661 def changegroupsubset(self, bases, heads, source, extranodes=None):
1571 def changegroupsubset(self, bases, heads, source, extranodes=None):
1662 """This function generates a changegroup consisting of all the nodes
1572 """This function generates a changegroup consisting of all the nodes
1663 that are descendents of any of the bases, and ancestors of any of
1573 that are descendents of any of the bases, and ancestors of any of
1664 the heads.
1574 the heads.
1665
1575
1666 It is fairly complex as determining which filenodes and which
1576 It is fairly complex as determining which filenodes and which
1667 manifest nodes need to be included for the changeset to be complete
1577 manifest nodes need to be included for the changeset to be complete
1668 is non-trivial.
1578 is non-trivial.
1669
1579
1670 Another wrinkle is doing the reverse, figuring out which changeset in
1580 Another wrinkle is doing the reverse, figuring out which changeset in
1671 the changegroup a particular filenode or manifestnode belongs to.
1581 the changegroup a particular filenode or manifestnode belongs to.
1672
1582
1673 The caller can specify some nodes that must be included in the
1583 The caller can specify some nodes that must be included in the
1674 changegroup using the extranodes argument. It should be a dict
1584 changegroup using the extranodes argument. It should be a dict
1675 where the keys are the filenames (or 1 for the manifest), and the
1585 where the keys are the filenames (or 1 for the manifest), and the
1676 values are lists of (node, linknode) tuples, where node is a wanted
1586 values are lists of (node, linknode) tuples, where node is a wanted
1677 node and linknode is the changelog node that should be transmitted as
1587 node and linknode is the changelog node that should be transmitted as
1678 the linkrev.
1588 the linkrev.
1679 """
1589 """
1680
1590
1681 if extranodes is None:
1591 if extranodes is None:
1682 # can we go through the fast path ?
1592 # can we go through the fast path ?
1683 heads.sort()
1593 heads.sort()
1684 allheads = self.heads()
1594 allheads = self.heads()
1685 allheads.sort()
1595 allheads.sort()
1686 if heads == allheads:
1596 if heads == allheads:
1687 common = []
1597 common = []
1688 # parents of bases are known from both sides
1598 # parents of bases are known from both sides
1689 for n in bases:
1599 for n in bases:
1690 for p in self.changelog.parents(n):
1600 for p in self.changelog.parents(n):
1691 if p != nullid:
1601 if p != nullid:
1692 common.append(p)
1602 common.append(p)
1693 return self._changegroup(common, source)
1603 return self._changegroup(common, source)
1694
1604
1695 self.hook('preoutgoing', throw=True, source=source)
1605 self.hook('preoutgoing', throw=True, source=source)
1696
1606
1697 # Set up some initial variables
1607 # Set up some initial variables
1698 # Make it easy to refer to self.changelog
1608 # Make it easy to refer to self.changelog
1699 cl = self.changelog
1609 cl = self.changelog
1700 # msng is short for missing - compute the list of changesets in this
1610 # msng is short for missing - compute the list of changesets in this
1701 # changegroup.
1611 # changegroup.
1702 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1612 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1703 self.changegroupinfo(msng_cl_lst, source)
1613 self.changegroupinfo(msng_cl_lst, source)
1704 # Some bases may turn out to be superfluous, and some heads may be
1614 # Some bases may turn out to be superfluous, and some heads may be
1705 # too. nodesbetween will return the minimal set of bases and heads
1615 # too. nodesbetween will return the minimal set of bases and heads
1706 # necessary to re-create the changegroup.
1616 # necessary to re-create the changegroup.
1707
1617
1708 # Known heads are the list of heads that it is assumed the recipient
1618 # Known heads are the list of heads that it is assumed the recipient
1709 # of this changegroup will know about.
1619 # of this changegroup will know about.
1710 knownheads = set()
1620 knownheads = set()
1711 # We assume that all parents of bases are known heads.
1621 # We assume that all parents of bases are known heads.
1712 for n in bases:
1622 for n in bases:
1713 knownheads.update(cl.parents(n))
1623 knownheads.update(cl.parents(n))
1714 knownheads.discard(nullid)
1624 knownheads.discard(nullid)
1715 knownheads = list(knownheads)
1625 knownheads = list(knownheads)
1716 if knownheads:
1626 if knownheads:
1717 # Now that we know what heads are known, we can compute which
1627 # Now that we know what heads are known, we can compute which
1718 # changesets are known. The recipient must know about all
1628 # changesets are known. The recipient must know about all
1719 # changesets required to reach the known heads from the null
1629 # changesets required to reach the known heads from the null
1720 # changeset.
1630 # changeset.
1721 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1631 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1722 junk = None
1632 junk = None
1723 # Transform the list into a set.
1633 # Transform the list into a set.
1724 has_cl_set = set(has_cl_set)
1634 has_cl_set = set(has_cl_set)
1725 else:
1635 else:
1726 # If there were no known heads, the recipient cannot be assumed to
1636 # If there were no known heads, the recipient cannot be assumed to
1727 # know about any changesets.
1637 # know about any changesets.
1728 has_cl_set = set()
1638 has_cl_set = set()
1729
1639
1730 # Make it easy to refer to self.manifest
1640 # Make it easy to refer to self.manifest
1731 mnfst = self.manifest
1641 mnfst = self.manifest
1732 # We don't know which manifests are missing yet
1642 # We don't know which manifests are missing yet
1733 msng_mnfst_set = {}
1643 msng_mnfst_set = {}
1734 # Nor do we know which filenodes are missing.
1644 # Nor do we know which filenodes are missing.
1735 msng_filenode_set = {}
1645 msng_filenode_set = {}
1736
1646
1737 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1647 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1738 junk = None
1648 junk = None
1739
1649
1740 # A changeset always belongs to itself, so the changenode lookup
1650 # A changeset always belongs to itself, so the changenode lookup
1741 # function for a changenode is identity.
1651 # function for a changenode is identity.
1742 def identity(x):
1652 def identity(x):
1743 return x
1653 return x
1744
1654
1745 # If we determine that a particular file or manifest node must be a
1655 # If we determine that a particular file or manifest node must be a
1746 # node that the recipient of the changegroup will already have, we can
1656 # node that the recipient of the changegroup will already have, we can
1747 # also assume the recipient will have all the parents. This function
1657 # also assume the recipient will have all the parents. This function
1748 # prunes them from the set of missing nodes.
1658 # prunes them from the set of missing nodes.
1749 def prune_parents(revlog, hasset, msngset):
1659 def prune_parents(revlog, hasset, msngset):
1750 haslst = list(hasset)
1660 haslst = list(hasset)
1751 haslst.sort(key=revlog.rev)
1661 haslst.sort(key=revlog.rev)
1752 for node in haslst:
1662 for node in haslst:
1753 parentlst = [p for p in revlog.parents(node) if p != nullid]
1663 parentlst = [p for p in revlog.parents(node) if p != nullid]
1754 while parentlst:
1664 while parentlst:
1755 n = parentlst.pop()
1665 n = parentlst.pop()
1756 if n not in hasset:
1666 if n not in hasset:
1757 hasset.add(n)
1667 hasset.add(n)
1758 p = [p for p in revlog.parents(n) if p != nullid]
1668 p = [p for p in revlog.parents(n) if p != nullid]
1759 parentlst.extend(p)
1669 parentlst.extend(p)
1760 for n in hasset:
1670 for n in hasset:
1761 msngset.pop(n, None)
1671 msngset.pop(n, None)
1762
1672
1763 # This is a function generating function used to set up an environment
1673 # This is a function generating function used to set up an environment
1764 # for the inner function to execute in.
1674 # for the inner function to execute in.
1765 def manifest_and_file_collector(changedfileset):
1675 def manifest_and_file_collector(changedfileset):
1766 # This is an information gathering function that gathers
1676 # This is an information gathering function that gathers
1767 # information from each changeset node that goes out as part of
1677 # information from each changeset node that goes out as part of
1768 # the changegroup. The information gathered is a list of which
1678 # the changegroup. The information gathered is a list of which
1769 # manifest nodes are potentially required (the recipient may
1679 # manifest nodes are potentially required (the recipient may
1770 # already have them) and total list of all files which were
1680 # already have them) and total list of all files which were
1771 # changed in any changeset in the changegroup.
1681 # changed in any changeset in the changegroup.
1772 #
1682 #
1773 # We also remember the first changenode we saw any manifest
1683 # We also remember the first changenode we saw any manifest
1774 # referenced by so we can later determine which changenode 'owns'
1684 # referenced by so we can later determine which changenode 'owns'
1775 # the manifest.
1685 # the manifest.
1776 def collect_manifests_and_files(clnode):
1686 def collect_manifests_and_files(clnode):
1777 c = cl.read(clnode)
1687 c = cl.read(clnode)
1778 for f in c[3]:
1688 for f in c[3]:
1779 # This is to make sure we only have one instance of each
1689 # This is to make sure we only have one instance of each
1780 # filename string for each filename.
1690 # filename string for each filename.
1781 changedfileset.setdefault(f, f)
1691 changedfileset.setdefault(f, f)
1782 msng_mnfst_set.setdefault(c[0], clnode)
1692 msng_mnfst_set.setdefault(c[0], clnode)
1783 return collect_manifests_and_files
1693 return collect_manifests_and_files
1784
1694
1785 # Figure out which manifest nodes (of the ones we think might be part
1695 # Figure out which manifest nodes (of the ones we think might be part
1786 # of the changegroup) the recipient must know about and remove them
1696 # of the changegroup) the recipient must know about and remove them
1787 # from the changegroup.
1697 # from the changegroup.
1788 def prune_manifests():
1698 def prune_manifests():
1789 has_mnfst_set = set()
1699 has_mnfst_set = set()
1790 for n in msng_mnfst_set:
1700 for n in msng_mnfst_set:
1791 # If a 'missing' manifest thinks it belongs to a changenode
1701 # If a 'missing' manifest thinks it belongs to a changenode
1792 # the recipient is assumed to have, obviously the recipient
1702 # the recipient is assumed to have, obviously the recipient
1793 # must have that manifest.
1703 # must have that manifest.
1794 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1704 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1795 if linknode in has_cl_set:
1705 if linknode in has_cl_set:
1796 has_mnfst_set.add(n)
1706 has_mnfst_set.add(n)
1797 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1707 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1798
1708
1799 # Use the information collected in collect_manifests_and_files to say
1709 # Use the information collected in collect_manifests_and_files to say
1800 # which changenode any manifestnode belongs to.
1710 # which changenode any manifestnode belongs to.
1801 def lookup_manifest_link(mnfstnode):
1711 def lookup_manifest_link(mnfstnode):
1802 return msng_mnfst_set[mnfstnode]
1712 return msng_mnfst_set[mnfstnode]
1803
1713
1804 # A function generating function that sets up the initial environment
1714 # A function generating function that sets up the initial environment
1805 # the inner function.
1715 # the inner function.
1806 def filenode_collector(changedfiles):
1716 def filenode_collector(changedfiles):
1807 next_rev = [0]
1717 next_rev = [0]
1808 # This gathers information from each manifestnode included in the
1718 # This gathers information from each manifestnode included in the
1809 # changegroup about which filenodes the manifest node references
1719 # changegroup about which filenodes the manifest node references
1810 # so we can include those in the changegroup too.
1720 # so we can include those in the changegroup too.
1811 #
1721 #
1812 # It also remembers which changenode each filenode belongs to. It
1722 # It also remembers which changenode each filenode belongs to. It
1813 # does this by assuming the a filenode belongs to the changenode
1723 # does this by assuming the a filenode belongs to the changenode
1814 # the first manifest that references it belongs to.
1724 # the first manifest that references it belongs to.
1815 def collect_msng_filenodes(mnfstnode):
1725 def collect_msng_filenodes(mnfstnode):
1816 r = mnfst.rev(mnfstnode)
1726 r = mnfst.rev(mnfstnode)
1817 if r == next_rev[0]:
1727 if r == next_rev[0]:
1818 # If the last rev we looked at was the one just previous,
1728 # If the last rev we looked at was the one just previous,
1819 # we only need to see a diff.
1729 # we only need to see a diff.
1820 deltamf = mnfst.readdelta(mnfstnode)
1730 deltamf = mnfst.readdelta(mnfstnode)
1821 # For each line in the delta
1731 # For each line in the delta
1822 for f, fnode in deltamf.iteritems():
1732 for f, fnode in deltamf.iteritems():
1823 f = changedfiles.get(f, None)
1733 f = changedfiles.get(f, None)
1824 # And if the file is in the list of files we care
1734 # And if the file is in the list of files we care
1825 # about.
1735 # about.
1826 if f is not None:
1736 if f is not None:
1827 # Get the changenode this manifest belongs to
1737 # Get the changenode this manifest belongs to
1828 clnode = msng_mnfst_set[mnfstnode]
1738 clnode = msng_mnfst_set[mnfstnode]
1829 # Create the set of filenodes for the file if
1739 # Create the set of filenodes for the file if
1830 # there isn't one already.
1740 # there isn't one already.
1831 ndset = msng_filenode_set.setdefault(f, {})
1741 ndset = msng_filenode_set.setdefault(f, {})
1832 # And set the filenode's changelog node to the
1742 # And set the filenode's changelog node to the
1833 # manifest's if it hasn't been set already.
1743 # manifest's if it hasn't been set already.
1834 ndset.setdefault(fnode, clnode)
1744 ndset.setdefault(fnode, clnode)
1835 else:
1745 else:
1836 # Otherwise we need a full manifest.
1746 # Otherwise we need a full manifest.
1837 m = mnfst.read(mnfstnode)
1747 m = mnfst.read(mnfstnode)
1838 # For every file in we care about.
1748 # For every file in we care about.
1839 for f in changedfiles:
1749 for f in changedfiles:
1840 fnode = m.get(f, None)
1750 fnode = m.get(f, None)
1841 # If it's in the manifest
1751 # If it's in the manifest
1842 if fnode is not None:
1752 if fnode is not None:
1843 # See comments above.
1753 # See comments above.
1844 clnode = msng_mnfst_set[mnfstnode]
1754 clnode = msng_mnfst_set[mnfstnode]
1845 ndset = msng_filenode_set.setdefault(f, {})
1755 ndset = msng_filenode_set.setdefault(f, {})
1846 ndset.setdefault(fnode, clnode)
1756 ndset.setdefault(fnode, clnode)
1847 # Remember the revision we hope to see next.
1757 # Remember the revision we hope to see next.
1848 next_rev[0] = r + 1
1758 next_rev[0] = r + 1
1849 return collect_msng_filenodes
1759 return collect_msng_filenodes
1850
1760
1851 # We have a list of filenodes we think we need for a file, lets remove
1761 # We have a list of filenodes we think we need for a file, lets remove
1852 # all those we know the recipient must have.
1762 # all those we know the recipient must have.
1853 def prune_filenodes(f, filerevlog):
1763 def prune_filenodes(f, filerevlog):
1854 msngset = msng_filenode_set[f]
1764 msngset = msng_filenode_set[f]
1855 hasset = set()
1765 hasset = set()
1856 # If a 'missing' filenode thinks it belongs to a changenode we
1766 # If a 'missing' filenode thinks it belongs to a changenode we
1857 # assume the recipient must have, then the recipient must have
1767 # assume the recipient must have, then the recipient must have
1858 # that filenode.
1768 # that filenode.
1859 for n in msngset:
1769 for n in msngset:
1860 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1770 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1861 if clnode in has_cl_set:
1771 if clnode in has_cl_set:
1862 hasset.add(n)
1772 hasset.add(n)
1863 prune_parents(filerevlog, hasset, msngset)
1773 prune_parents(filerevlog, hasset, msngset)
1864
1774
1865 # A function generator function that sets up the a context for the
1775 # A function generator function that sets up the a context for the
1866 # inner function.
1776 # inner function.
1867 def lookup_filenode_link_func(fname):
1777 def lookup_filenode_link_func(fname):
1868 msngset = msng_filenode_set[fname]
1778 msngset = msng_filenode_set[fname]
1869 # Lookup the changenode the filenode belongs to.
1779 # Lookup the changenode the filenode belongs to.
1870 def lookup_filenode_link(fnode):
1780 def lookup_filenode_link(fnode):
1871 return msngset[fnode]
1781 return msngset[fnode]
1872 return lookup_filenode_link
1782 return lookup_filenode_link
1873
1783
1874 # Add the nodes that were explicitly requested.
1784 # Add the nodes that were explicitly requested.
1875 def add_extra_nodes(name, nodes):
1785 def add_extra_nodes(name, nodes):
1876 if not extranodes or name not in extranodes:
1786 if not extranodes or name not in extranodes:
1877 return
1787 return
1878
1788
1879 for node, linknode in extranodes[name]:
1789 for node, linknode in extranodes[name]:
1880 if node not in nodes:
1790 if node not in nodes:
1881 nodes[node] = linknode
1791 nodes[node] = linknode
1882
1792
1883 # Now that we have all theses utility functions to help out and
1793 # Now that we have all theses utility functions to help out and
1884 # logically divide up the task, generate the group.
1794 # logically divide up the task, generate the group.
1885 def gengroup():
1795 def gengroup():
1886 # The set of changed files starts empty.
1796 # The set of changed files starts empty.
1887 changedfiles = {}
1797 changedfiles = {}
1888 # Create a changenode group generator that will call our functions
1798 # Create a changenode group generator that will call our functions
1889 # back to lookup the owning changenode and collect information.
1799 # back to lookup the owning changenode and collect information.
1890 group = cl.group(msng_cl_lst, identity,
1800 group = cl.group(msng_cl_lst, identity,
1891 manifest_and_file_collector(changedfiles))
1801 manifest_and_file_collector(changedfiles))
1892 for chnk in group:
1802 for chnk in group:
1893 yield chnk
1803 yield chnk
1894
1804
1895 # The list of manifests has been collected by the generator
1805 # The list of manifests has been collected by the generator
1896 # calling our functions back.
1806 # calling our functions back.
1897 prune_manifests()
1807 prune_manifests()
1898 add_extra_nodes(1, msng_mnfst_set)
1808 add_extra_nodes(1, msng_mnfst_set)
1899 msng_mnfst_lst = msng_mnfst_set.keys()
1809 msng_mnfst_lst = msng_mnfst_set.keys()
1900 # Sort the manifestnodes by revision number.
1810 # Sort the manifestnodes by revision number.
1901 msng_mnfst_lst.sort(key=mnfst.rev)
1811 msng_mnfst_lst.sort(key=mnfst.rev)
1902 # Create a generator for the manifestnodes that calls our lookup
1812 # Create a generator for the manifestnodes that calls our lookup
1903 # and data collection functions back.
1813 # and data collection functions back.
1904 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1814 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1905 filenode_collector(changedfiles))
1815 filenode_collector(changedfiles))
1906 for chnk in group:
1816 for chnk in group:
1907 yield chnk
1817 yield chnk
1908
1818
1909 # These are no longer needed, dereference and toss the memory for
1819 # These are no longer needed, dereference and toss the memory for
1910 # them.
1820 # them.
1911 msng_mnfst_lst = None
1821 msng_mnfst_lst = None
1912 msng_mnfst_set.clear()
1822 msng_mnfst_set.clear()
1913
1823
1914 if extranodes:
1824 if extranodes:
1915 for fname in extranodes:
1825 for fname in extranodes:
1916 if isinstance(fname, int):
1826 if isinstance(fname, int):
1917 continue
1827 continue
1918 msng_filenode_set.setdefault(fname, {})
1828 msng_filenode_set.setdefault(fname, {})
1919 changedfiles[fname] = 1
1829 changedfiles[fname] = 1
1920 # Go through all our files in order sorted by name.
1830 # Go through all our files in order sorted by name.
1921 for fname in sorted(changedfiles):
1831 for fname in sorted(changedfiles):
1922 filerevlog = self.file(fname)
1832 filerevlog = self.file(fname)
1923 if not len(filerevlog):
1833 if not len(filerevlog):
1924 raise util.Abort(_("empty or missing revlog for %s") % fname)
1834 raise util.Abort(_("empty or missing revlog for %s") % fname)
1925 # Toss out the filenodes that the recipient isn't really
1835 # Toss out the filenodes that the recipient isn't really
1926 # missing.
1836 # missing.
1927 if fname in msng_filenode_set:
1837 if fname in msng_filenode_set:
1928 prune_filenodes(fname, filerevlog)
1838 prune_filenodes(fname, filerevlog)
1929 add_extra_nodes(fname, msng_filenode_set[fname])
1839 add_extra_nodes(fname, msng_filenode_set[fname])
1930 msng_filenode_lst = msng_filenode_set[fname].keys()
1840 msng_filenode_lst = msng_filenode_set[fname].keys()
1931 else:
1841 else:
1932 msng_filenode_lst = []
1842 msng_filenode_lst = []
1933 # If any filenodes are left, generate the group for them,
1843 # If any filenodes are left, generate the group for them,
1934 # otherwise don't bother.
1844 # otherwise don't bother.
1935 if len(msng_filenode_lst) > 0:
1845 if len(msng_filenode_lst) > 0:
1936 yield changegroup.chunkheader(len(fname))
1846 yield changegroup.chunkheader(len(fname))
1937 yield fname
1847 yield fname
1938 # Sort the filenodes by their revision #
1848 # Sort the filenodes by their revision #
1939 msng_filenode_lst.sort(key=filerevlog.rev)
1849 msng_filenode_lst.sort(key=filerevlog.rev)
1940 # Create a group generator and only pass in a changenode
1850 # Create a group generator and only pass in a changenode
1941 # lookup function as we need to collect no information
1851 # lookup function as we need to collect no information
1942 # from filenodes.
1852 # from filenodes.
1943 group = filerevlog.group(msng_filenode_lst,
1853 group = filerevlog.group(msng_filenode_lst,
1944 lookup_filenode_link_func(fname))
1854 lookup_filenode_link_func(fname))
1945 for chnk in group:
1855 for chnk in group:
1946 yield chnk
1856 yield chnk
1947 if fname in msng_filenode_set:
1857 if fname in msng_filenode_set:
1948 # Don't need this anymore, toss it to free memory.
1858 # Don't need this anymore, toss it to free memory.
1949 del msng_filenode_set[fname]
1859 del msng_filenode_set[fname]
1950 # Signal that no more groups are left.
1860 # Signal that no more groups are left.
1951 yield changegroup.closechunk()
1861 yield changegroup.closechunk()
1952
1862
1953 if msng_cl_lst:
1863 if msng_cl_lst:
1954 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1864 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1955
1865
1956 return util.chunkbuffer(gengroup())
1866 return util.chunkbuffer(gengroup())
1957
1867
1958 def changegroup(self, basenodes, source):
1868 def changegroup(self, basenodes, source):
1959 # to avoid a race we use changegroupsubset() (issue1320)
1869 # to avoid a race we use changegroupsubset() (issue1320)
1960 return self.changegroupsubset(basenodes, self.heads(), source)
1870 return self.changegroupsubset(basenodes, self.heads(), source)
1961
1871
1962 def _changegroup(self, common, source):
1872 def _changegroup(self, common, source):
1963 """Generate a changegroup of all nodes that we have that a recipient
1873 """Generate a changegroup of all nodes that we have that a recipient
1964 doesn't.
1874 doesn't.
1965
1875
1966 This is much easier than the previous function as we can assume that
1876 This is much easier than the previous function as we can assume that
1967 the recipient has any changenode we aren't sending them.
1877 the recipient has any changenode we aren't sending them.
1968
1878
1969 common is the set of common nodes between remote and self"""
1879 common is the set of common nodes between remote and self"""
1970
1880
1971 self.hook('preoutgoing', throw=True, source=source)
1881 self.hook('preoutgoing', throw=True, source=source)
1972
1882
1973 cl = self.changelog
1883 cl = self.changelog
1974 nodes = cl.findmissing(common)
1884 nodes = cl.findmissing(common)
1975 revset = set([cl.rev(n) for n in nodes])
1885 revset = set([cl.rev(n) for n in nodes])
1976 self.changegroupinfo(nodes, source)
1886 self.changegroupinfo(nodes, source)
1977
1887
1978 def identity(x):
1888 def identity(x):
1979 return x
1889 return x
1980
1890
1981 def gennodelst(log):
1891 def gennodelst(log):
1982 for r in log:
1892 for r in log:
1983 if log.linkrev(r) in revset:
1893 if log.linkrev(r) in revset:
1984 yield log.node(r)
1894 yield log.node(r)
1985
1895
1986 def changed_file_collector(changedfileset):
1896 def changed_file_collector(changedfileset):
1987 def collect_changed_files(clnode):
1897 def collect_changed_files(clnode):
1988 c = cl.read(clnode)
1898 c = cl.read(clnode)
1989 changedfileset.update(c[3])
1899 changedfileset.update(c[3])
1990 return collect_changed_files
1900 return collect_changed_files
1991
1901
1992 def lookuprevlink_func(revlog):
1902 def lookuprevlink_func(revlog):
1993 def lookuprevlink(n):
1903 def lookuprevlink(n):
1994 return cl.node(revlog.linkrev(revlog.rev(n)))
1904 return cl.node(revlog.linkrev(revlog.rev(n)))
1995 return lookuprevlink
1905 return lookuprevlink
1996
1906
1997 def gengroup():
1907 def gengroup():
1998 # construct a list of all changed files
1908 # construct a list of all changed files
1999 changedfiles = set()
1909 changedfiles = set()
2000
1910
2001 for chnk in cl.group(nodes, identity,
1911 for chnk in cl.group(nodes, identity,
2002 changed_file_collector(changedfiles)):
1912 changed_file_collector(changedfiles)):
2003 yield chnk
1913 yield chnk
2004
1914
2005 mnfst = self.manifest
1915 mnfst = self.manifest
2006 nodeiter = gennodelst(mnfst)
1916 nodeiter = gennodelst(mnfst)
2007 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1917 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
2008 yield chnk
1918 yield chnk
2009
1919
2010 for fname in sorted(changedfiles):
1920 for fname in sorted(changedfiles):
2011 filerevlog = self.file(fname)
1921 filerevlog = self.file(fname)
2012 if not len(filerevlog):
1922 if not len(filerevlog):
2013 raise util.Abort(_("empty or missing revlog for %s") % fname)
1923 raise util.Abort(_("empty or missing revlog for %s") % fname)
2014 nodeiter = gennodelst(filerevlog)
1924 nodeiter = gennodelst(filerevlog)
2015 nodeiter = list(nodeiter)
1925 nodeiter = list(nodeiter)
2016 if nodeiter:
1926 if nodeiter:
2017 yield changegroup.chunkheader(len(fname))
1927 yield changegroup.chunkheader(len(fname))
2018 yield fname
1928 yield fname
2019 lookup = lookuprevlink_func(filerevlog)
1929 lookup = lookuprevlink_func(filerevlog)
2020 for chnk in filerevlog.group(nodeiter, lookup):
1930 for chnk in filerevlog.group(nodeiter, lookup):
2021 yield chnk
1931 yield chnk
2022
1932
2023 yield changegroup.closechunk()
1933 yield changegroup.closechunk()
2024
1934
2025 if nodes:
1935 if nodes:
2026 self.hook('outgoing', node=hex(nodes[0]), source=source)
1936 self.hook('outgoing', node=hex(nodes[0]), source=source)
2027
1937
2028 return util.chunkbuffer(gengroup())
1938 return util.chunkbuffer(gengroup())
2029
1939
2030 def addchangegroup(self, source, srctype, url, emptyok=False):
1940 def addchangegroup(self, source, srctype, url, emptyok=False):
2031 """add changegroup to repo.
1941 """add changegroup to repo.
2032
1942
2033 return values:
1943 return values:
2034 - nothing changed or no source: 0
1944 - nothing changed or no source: 0
2035 - more heads than before: 1+added heads (2..n)
1945 - more heads than before: 1+added heads (2..n)
2036 - less heads than before: -1-removed heads (-2..-n)
1946 - less heads than before: -1-removed heads (-2..-n)
2037 - number of heads stays the same: 1
1947 - number of heads stays the same: 1
2038 """
1948 """
2039 def csmap(x):
1949 def csmap(x):
2040 self.ui.debug(_("add changeset %s\n") % short(x))
1950 self.ui.debug(_("add changeset %s\n") % short(x))
2041 return len(cl)
1951 return len(cl)
2042
1952
2043 def revmap(x):
1953 def revmap(x):
2044 return cl.rev(x)
1954 return cl.rev(x)
2045
1955
2046 if not source:
1956 if not source:
2047 return 0
1957 return 0
2048
1958
2049 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1959 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2050
1960
2051 changesets = files = revisions = 0
1961 changesets = files = revisions = 0
2052
1962
2053 # write changelog data to temp files so concurrent readers will not see
1963 # write changelog data to temp files so concurrent readers will not see
2054 # inconsistent view
1964 # inconsistent view
2055 cl = self.changelog
1965 cl = self.changelog
2056 cl.delayupdate()
1966 cl.delayupdate()
2057 oldheads = len(cl.heads())
1967 oldheads = len(cl.heads())
2058
1968
2059 tr = self.transaction()
1969 tr = self.transaction()
2060 try:
1970 try:
2061 trp = weakref.proxy(tr)
1971 trp = weakref.proxy(tr)
2062 # pull off the changeset group
1972 # pull off the changeset group
2063 self.ui.status(_("adding changesets\n"))
1973 self.ui.status(_("adding changesets\n"))
2064 clstart = len(cl)
1974 clstart = len(cl)
2065 chunkiter = changegroup.chunkiter(source)
1975 chunkiter = changegroup.chunkiter(source)
2066 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1976 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2067 raise util.Abort(_("received changelog group is empty"))
1977 raise util.Abort(_("received changelog group is empty"))
2068 clend = len(cl)
1978 clend = len(cl)
2069 changesets = clend - clstart
1979 changesets = clend - clstart
2070
1980
2071 # pull off the manifest group
1981 # pull off the manifest group
2072 self.ui.status(_("adding manifests\n"))
1982 self.ui.status(_("adding manifests\n"))
2073 chunkiter = changegroup.chunkiter(source)
1983 chunkiter = changegroup.chunkiter(source)
2074 # no need to check for empty manifest group here:
1984 # no need to check for empty manifest group here:
2075 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1985 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2076 # no new manifest will be created and the manifest group will
1986 # no new manifest will be created and the manifest group will
2077 # be empty during the pull
1987 # be empty during the pull
2078 self.manifest.addgroup(chunkiter, revmap, trp)
1988 self.manifest.addgroup(chunkiter, revmap, trp)
2079
1989
2080 # process the files
1990 # process the files
2081 self.ui.status(_("adding file changes\n"))
1991 self.ui.status(_("adding file changes\n"))
2082 while 1:
1992 while 1:
2083 f = changegroup.getchunk(source)
1993 f = changegroup.getchunk(source)
2084 if not f:
1994 if not f:
2085 break
1995 break
2086 self.ui.debug(_("adding %s revisions\n") % f)
1996 self.ui.debug(_("adding %s revisions\n") % f)
2087 fl = self.file(f)
1997 fl = self.file(f)
2088 o = len(fl)
1998 o = len(fl)
2089 chunkiter = changegroup.chunkiter(source)
1999 chunkiter = changegroup.chunkiter(source)
2090 if fl.addgroup(chunkiter, revmap, trp) is None:
2000 if fl.addgroup(chunkiter, revmap, trp) is None:
2091 raise util.Abort(_("received file revlog group is empty"))
2001 raise util.Abort(_("received file revlog group is empty"))
2092 revisions += len(fl) - o
2002 revisions += len(fl) - o
2093 files += 1
2003 files += 1
2094
2004
2095 newheads = len(cl.heads())
2005 newheads = len(cl.heads())
2096 heads = ""
2006 heads = ""
2097 if oldheads and newheads != oldheads:
2007 if oldheads and newheads != oldheads:
2098 heads = _(" (%+d heads)") % (newheads - oldheads)
2008 heads = _(" (%+d heads)") % (newheads - oldheads)
2099
2009
2100 self.ui.status(_("added %d changesets"
2010 self.ui.status(_("added %d changesets"
2101 " with %d changes to %d files%s\n")
2011 " with %d changes to %d files%s\n")
2102 % (changesets, revisions, files, heads))
2012 % (changesets, revisions, files, heads))
2103
2013
2104 if changesets > 0:
2014 if changesets > 0:
2105 p = lambda: cl.writepending() and self.root or ""
2015 p = lambda: cl.writepending() and self.root or ""
2106 self.hook('pretxnchangegroup', throw=True,
2016 self.hook('pretxnchangegroup', throw=True,
2107 node=hex(cl.node(clstart)), source=srctype,
2017 node=hex(cl.node(clstart)), source=srctype,
2108 url=url, pending=p)
2018 url=url, pending=p)
2109
2019
2110 # make changelog see real files again
2020 # make changelog see real files again
2111 cl.finalize(trp)
2021 cl.finalize(trp)
2112
2022
2113 tr.close()
2023 tr.close()
2114 finally:
2024 finally:
2115 del tr
2025 del tr
2116
2026
2117 if changesets > 0:
2027 if changesets > 0:
2118 # forcefully update the on-disk branch cache
2028 # forcefully update the on-disk branch cache
2119 self.ui.debug(_("updating the branch cache\n"))
2029 self.ui.debug(_("updating the branch cache\n"))
2120 self.branchtags()
2030 self.branchtags()
2121 self.hook("changegroup", node=hex(cl.node(clstart)),
2031 self.hook("changegroup", node=hex(cl.node(clstart)),
2122 source=srctype, url=url)
2032 source=srctype, url=url)
2123
2033
2124 for i in xrange(clstart, clend):
2034 for i in xrange(clstart, clend):
2125 self.hook("incoming", node=hex(cl.node(i)),
2035 self.hook("incoming", node=hex(cl.node(i)),
2126 source=srctype, url=url)
2036 source=srctype, url=url)
2127
2037
2128 # never return 0 here:
2038 # never return 0 here:
2129 if newheads < oldheads:
2039 if newheads < oldheads:
2130 return newheads - oldheads - 1
2040 return newheads - oldheads - 1
2131 else:
2041 else:
2132 return newheads - oldheads + 1
2042 return newheads - oldheads + 1
2133
2043
2134
2044
2135 def stream_in(self, remote):
2045 def stream_in(self, remote):
2136 fp = remote.stream_out()
2046 fp = remote.stream_out()
2137 l = fp.readline()
2047 l = fp.readline()
2138 try:
2048 try:
2139 resp = int(l)
2049 resp = int(l)
2140 except ValueError:
2050 except ValueError:
2141 raise error.ResponseError(
2051 raise error.ResponseError(
2142 _('Unexpected response from remote server:'), l)
2052 _('Unexpected response from remote server:'), l)
2143 if resp == 1:
2053 if resp == 1:
2144 raise util.Abort(_('operation forbidden by server'))
2054 raise util.Abort(_('operation forbidden by server'))
2145 elif resp == 2:
2055 elif resp == 2:
2146 raise util.Abort(_('locking the remote repository failed'))
2056 raise util.Abort(_('locking the remote repository failed'))
2147 elif resp != 0:
2057 elif resp != 0:
2148 raise util.Abort(_('the server sent an unknown error code'))
2058 raise util.Abort(_('the server sent an unknown error code'))
2149 self.ui.status(_('streaming all changes\n'))
2059 self.ui.status(_('streaming all changes\n'))
2150 l = fp.readline()
2060 l = fp.readline()
2151 try:
2061 try:
2152 total_files, total_bytes = map(int, l.split(' ', 1))
2062 total_files, total_bytes = map(int, l.split(' ', 1))
2153 except (ValueError, TypeError):
2063 except (ValueError, TypeError):
2154 raise error.ResponseError(
2064 raise error.ResponseError(
2155 _('Unexpected response from remote server:'), l)
2065 _('Unexpected response from remote server:'), l)
2156 self.ui.status(_('%d files to transfer, %s of data\n') %
2066 self.ui.status(_('%d files to transfer, %s of data\n') %
2157 (total_files, util.bytecount(total_bytes)))
2067 (total_files, util.bytecount(total_bytes)))
2158 start = time.time()
2068 start = time.time()
2159 for i in xrange(total_files):
2069 for i in xrange(total_files):
2160 # XXX doesn't support '\n' or '\r' in filenames
2070 # XXX doesn't support '\n' or '\r' in filenames
2161 l = fp.readline()
2071 l = fp.readline()
2162 try:
2072 try:
2163 name, size = l.split('\0', 1)
2073 name, size = l.split('\0', 1)
2164 size = int(size)
2074 size = int(size)
2165 except (ValueError, TypeError):
2075 except (ValueError, TypeError):
2166 raise error.ResponseError(
2076 raise error.ResponseError(
2167 _('Unexpected response from remote server:'), l)
2077 _('Unexpected response from remote server:'), l)
2168 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2078 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2169 # for backwards compat, name was partially encoded
2079 # for backwards compat, name was partially encoded
2170 ofp = self.sopener(store.decodedir(name), 'w')
2080 ofp = self.sopener(store.decodedir(name), 'w')
2171 for chunk in util.filechunkiter(fp, limit=size):
2081 for chunk in util.filechunkiter(fp, limit=size):
2172 ofp.write(chunk)
2082 ofp.write(chunk)
2173 ofp.close()
2083 ofp.close()
2174 elapsed = time.time() - start
2084 elapsed = time.time() - start
2175 if elapsed <= 0:
2085 if elapsed <= 0:
2176 elapsed = 0.001
2086 elapsed = 0.001
2177 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2087 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2178 (util.bytecount(total_bytes), elapsed,
2088 (util.bytecount(total_bytes), elapsed,
2179 util.bytecount(total_bytes / elapsed)))
2089 util.bytecount(total_bytes / elapsed)))
2180 self.invalidate()
2090 self.invalidate()
2181 return len(self.heads()) + 1
2091 return len(self.heads()) + 1
2182
2092
2183 def clone(self, remote, heads=[], stream=False):
2093 def clone(self, remote, heads=[], stream=False):
2184 '''clone remote repository.
2094 '''clone remote repository.
2185
2095
2186 keyword arguments:
2096 keyword arguments:
2187 heads: list of revs to clone (forces use of pull)
2097 heads: list of revs to clone (forces use of pull)
2188 stream: use streaming clone if possible'''
2098 stream: use streaming clone if possible'''
2189
2099
2190 # now, all clients that can request uncompressed clones can
2100 # now, all clients that can request uncompressed clones can
2191 # read repo formats supported by all servers that can serve
2101 # read repo formats supported by all servers that can serve
2192 # them.
2102 # them.
2193
2103
2194 # if revlog format changes, client will have to check version
2104 # if revlog format changes, client will have to check version
2195 # and format flags on "stream" capability, and use
2105 # and format flags on "stream" capability, and use
2196 # uncompressed only if compatible.
2106 # uncompressed only if compatible.
2197
2107
2198 if stream and not heads and remote.capable('stream'):
2108 if stream and not heads and remote.capable('stream'):
2199 return self.stream_in(remote)
2109 return self.stream_in(remote)
2200 return self.pull(remote, heads)
2110 return self.pull(remote, heads)
2201
2111
2202 # used to avoid circular references so destructors work
2112 # used to avoid circular references so destructors work
2203 def aftertrans(files):
2113 def aftertrans(files):
2204 renamefiles = [tuple(t) for t in files]
2114 renamefiles = [tuple(t) for t in files]
2205 def a():
2115 def a():
2206 for src, dest in renamefiles:
2116 for src, dest in renamefiles:
2207 util.rename(src, dest)
2117 util.rename(src, dest)
2208 return a
2118 return a
2209
2119
2210 def instance(ui, path, create):
2120 def instance(ui, path, create):
2211 return localrepository(ui, util.drop_scheme('file', path), create)
2121 return localrepository(ui, util.drop_scheme('file', path), create)
2212
2122
2213 def islocal(path):
2123 def islocal(path):
2214 return True
2124 return True
General Comments 0
You need to be logged in to leave comments. Login now