Show More
@@ -1,303 +1,303 b'' | |||||
1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories |
|
1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | from mercurial.i18n import _ |
|
9 | from mercurial.i18n import _ | |
10 | from mercurial.node import bin, hex, nullid |
|
10 | from mercurial.node import bin, hex, nullid | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | bundlerepo, |
|
12 | bundlerepo, | |
13 | changegroup, |
|
13 | changegroup, | |
14 | error, |
|
14 | error, | |
15 | match, |
|
15 | match, | |
16 | mdiff, |
|
16 | mdiff, | |
17 | pycompat, |
|
17 | pycompat, | |
18 | ) |
|
18 | ) | |
19 | from . import ( |
|
19 | from . import ( | |
20 | constants, |
|
20 | constants, | |
21 | remotefilelog, |
|
21 | remotefilelog, | |
22 | shallowutil, |
|
22 | shallowutil, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | NoFiles = 0 |
|
25 | NoFiles = 0 | |
26 | LocalFiles = 1 |
|
26 | LocalFiles = 1 | |
27 | AllFiles = 2 |
|
27 | AllFiles = 2 | |
28 |
|
28 | |||
29 |
|
29 | |||
30 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): |
|
30 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): | |
31 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
31 | if not isinstance(rlog, remotefilelog.remotefilelog): | |
32 | for c in super(cls, self).group(nodelist, rlog, lookup, units=units): |
|
32 | for c in super(cls, self).group(nodelist, rlog, lookup, units=units): | |
33 | yield c |
|
33 | yield c | |
34 | return |
|
34 | return | |
35 |
|
35 | |||
36 | if len(nodelist) == 0: |
|
36 | if len(nodelist) == 0: | |
37 | yield self.close() |
|
37 | yield self.close() | |
38 | return |
|
38 | return | |
39 |
|
39 | |||
40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) |
|
40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) | |
41 |
|
41 | |||
42 | # add the parent of the first rev |
|
42 | # add the parent of the first rev | |
43 | p = rlog.parents(nodelist[0])[0] |
|
43 | p = rlog.parents(nodelist[0])[0] | |
44 | nodelist.insert(0, p) |
|
44 | nodelist.insert(0, p) | |
45 |
|
45 | |||
46 | # build deltas |
|
46 | # build deltas | |
47 | for i in pycompat.xrange(len(nodelist) - 1): |
|
47 | for i in pycompat.xrange(len(nodelist) - 1): | |
48 | prev, curr = nodelist[i], nodelist[i + 1] |
|
48 | prev, curr = nodelist[i], nodelist[i + 1] | |
49 | linknode = lookup(curr) |
|
49 | linknode = lookup(curr) | |
50 | for c in self.nodechunk(rlog, curr, prev, linknode): |
|
50 | for c in self.nodechunk(rlog, curr, prev, linknode): | |
51 | yield c |
|
51 | yield c | |
52 |
|
52 | |||
53 | yield self.close() |
|
53 | yield self.close() | |
54 |
|
54 | |||
55 |
|
55 | |||
56 | class shallowcg1packer(changegroup.cgpacker): |
|
56 | class shallowcg1packer(changegroup.cgpacker): | |
57 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
57 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs): | |
58 | if shallowutil.isenabled(self._repo): |
|
58 | if shallowutil.isenabled(self._repo): | |
59 | fastpathlinkrev = False |
|
59 | fastpathlinkrev = False | |
60 |
|
60 | |||
61 | return super(shallowcg1packer, self).generate( |
|
61 | return super(shallowcg1packer, self).generate( | |
62 | commonrevs, clnodes, fastpathlinkrev, source |
|
62 | commonrevs, clnodes, fastpathlinkrev, source, **kwargs | |
63 | ) |
|
63 | ) | |
64 |
|
64 | |||
65 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): |
|
65 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): | |
66 | return shallowgroup( |
|
66 | return shallowgroup( | |
67 | shallowcg1packer, self, nodelist, rlog, lookup, units=units |
|
67 | shallowcg1packer, self, nodelist, rlog, lookup, units=units | |
68 | ) |
|
68 | ) | |
69 |
|
69 | |||
70 | def generatefiles(self, changedfiles, *args): |
|
70 | def generatefiles(self, changedfiles, *args): | |
71 | try: |
|
71 | try: | |
72 | linknodes, commonrevs, source = args |
|
72 | linknodes, commonrevs, source = args | |
73 | except ValueError: |
|
73 | except ValueError: | |
74 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args |
|
74 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args | |
75 | if shallowutil.isenabled(self._repo): |
|
75 | if shallowutil.isenabled(self._repo): | |
76 | repo = self._repo |
|
76 | repo = self._repo | |
77 | if isinstance(repo, bundlerepo.bundlerepository): |
|
77 | if isinstance(repo, bundlerepo.bundlerepository): | |
78 | # If the bundle contains filelogs, we can't pull from it, since |
|
78 | # If the bundle contains filelogs, we can't pull from it, since | |
79 | # bundlerepo is heavily tied to revlogs. Instead require that |
|
79 | # bundlerepo is heavily tied to revlogs. Instead require that | |
80 | # the user use unbundle instead. |
|
80 | # the user use unbundle instead. | |
81 | # Force load the filelog data. |
|
81 | # Force load the filelog data. | |
82 | bundlerepo.bundlerepository.file(repo, b'foo') |
|
82 | bundlerepo.bundlerepository.file(repo, b'foo') | |
83 | if repo._cgfilespos: |
|
83 | if repo._cgfilespos: | |
84 | raise error.Abort( |
|
84 | raise error.Abort( | |
85 | b"cannot pull from full bundles", |
|
85 | b"cannot pull from full bundles", | |
86 | hint=b"use `hg unbundle` instead", |
|
86 | hint=b"use `hg unbundle` instead", | |
87 | ) |
|
87 | ) | |
88 | return [] |
|
88 | return [] | |
89 | filestosend = self.shouldaddfilegroups(source) |
|
89 | filestosend = self.shouldaddfilegroups(source) | |
90 | if filestosend == NoFiles: |
|
90 | if filestosend == NoFiles: | |
91 | changedfiles = list( |
|
91 | changedfiles = list( | |
92 | [f for f in changedfiles if not repo.shallowmatch(f)] |
|
92 | [f for f in changedfiles if not repo.shallowmatch(f)] | |
93 | ) |
|
93 | ) | |
94 |
|
94 | |||
95 | return super(shallowcg1packer, self).generatefiles(changedfiles, *args) |
|
95 | return super(shallowcg1packer, self).generatefiles(changedfiles, *args) | |
96 |
|
96 | |||
97 | def shouldaddfilegroups(self, source): |
|
97 | def shouldaddfilegroups(self, source): | |
98 | repo = self._repo |
|
98 | repo = self._repo | |
99 | if not shallowutil.isenabled(repo): |
|
99 | if not shallowutil.isenabled(repo): | |
100 | return AllFiles |
|
100 | return AllFiles | |
101 |
|
101 | |||
102 | if source == b"push" or source == b"bundle": |
|
102 | if source == b"push" or source == b"bundle": | |
103 | return AllFiles |
|
103 | return AllFiles | |
104 |
|
104 | |||
105 | caps = self._bundlecaps or [] |
|
105 | caps = self._bundlecaps or [] | |
106 | if source == b"serve" or source == b"pull": |
|
106 | if source == b"serve" or source == b"pull": | |
107 | if constants.BUNDLE2_CAPABLITY in caps: |
|
107 | if constants.BUNDLE2_CAPABLITY in caps: | |
108 | return LocalFiles |
|
108 | return LocalFiles | |
109 | else: |
|
109 | else: | |
110 | # Serving to a full repo requires us to serve everything |
|
110 | # Serving to a full repo requires us to serve everything | |
111 | repo.ui.warn(_(b"pulling from a shallow repo\n")) |
|
111 | repo.ui.warn(_(b"pulling from a shallow repo\n")) | |
112 | return AllFiles |
|
112 | return AllFiles | |
113 |
|
113 | |||
114 | return NoFiles |
|
114 | return NoFiles | |
115 |
|
115 | |||
116 | def prune(self, rlog, missing, commonrevs): |
|
116 | def prune(self, rlog, missing, commonrevs): | |
117 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
117 | if not isinstance(rlog, remotefilelog.remotefilelog): | |
118 | return super(shallowcg1packer, self).prune( |
|
118 | return super(shallowcg1packer, self).prune( | |
119 | rlog, missing, commonrevs |
|
119 | rlog, missing, commonrevs | |
120 | ) |
|
120 | ) | |
121 |
|
121 | |||
122 | repo = self._repo |
|
122 | repo = self._repo | |
123 | results = [] |
|
123 | results = [] | |
124 | for fnode in missing: |
|
124 | for fnode in missing: | |
125 | fctx = repo.filectx(rlog.filename, fileid=fnode) |
|
125 | fctx = repo.filectx(rlog.filename, fileid=fnode) | |
126 | if fctx.linkrev() not in commonrevs: |
|
126 | if fctx.linkrev() not in commonrevs: | |
127 | results.append(fnode) |
|
127 | results.append(fnode) | |
128 | return results |
|
128 | return results | |
129 |
|
129 | |||
130 | def nodechunk(self, revlog, node, prevnode, linknode): |
|
130 | def nodechunk(self, revlog, node, prevnode, linknode): | |
131 | prefix = b'' |
|
131 | prefix = b'' | |
132 | if prevnode == nullid: |
|
132 | if prevnode == nullid: | |
133 | delta = revlog.rawdata(node) |
|
133 | delta = revlog.rawdata(node) | |
134 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
134 | prefix = mdiff.trivialdiffheader(len(delta)) | |
135 | else: |
|
135 | else: | |
136 | # Actually uses remotefilelog.revdiff which works on nodes, not revs |
|
136 | # Actually uses remotefilelog.revdiff which works on nodes, not revs | |
137 | delta = revlog.revdiff(prevnode, node) |
|
137 | delta = revlog.revdiff(prevnode, node) | |
138 | p1, p2 = revlog.parents(node) |
|
138 | p1, p2 = revlog.parents(node) | |
139 | flags = revlog.flags(node) |
|
139 | flags = revlog.flags(node) | |
140 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) |
|
140 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) | |
141 | meta += prefix |
|
141 | meta += prefix | |
142 | l = len(meta) + len(delta) |
|
142 | l = len(meta) + len(delta) | |
143 | yield changegroup.chunkheader(l) |
|
143 | yield changegroup.chunkheader(l) | |
144 | yield meta |
|
144 | yield meta | |
145 | yield delta |
|
145 | yield delta | |
146 |
|
146 | |||
147 |
|
147 | |||
148 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): |
|
148 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): | |
149 | if not shallowutil.isenabled(repo): |
|
149 | if not shallowutil.isenabled(repo): | |
150 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
150 | return orig(repo, outgoing, version, source, *args, **kwargs) | |
151 |
|
151 | |||
152 | original = repo.shallowmatch |
|
152 | original = repo.shallowmatch | |
153 | try: |
|
153 | try: | |
154 | # if serving, only send files the clients has patterns for |
|
154 | # if serving, only send files the clients has patterns for | |
155 | if source == b'serve': |
|
155 | if source == b'serve': | |
156 | bundlecaps = kwargs.get('bundlecaps') |
|
156 | bundlecaps = kwargs.get('bundlecaps') | |
157 | includepattern = None |
|
157 | includepattern = None | |
158 | excludepattern = None |
|
158 | excludepattern = None | |
159 | for cap in bundlecaps or []: |
|
159 | for cap in bundlecaps or []: | |
160 | if cap.startswith(b"includepattern="): |
|
160 | if cap.startswith(b"includepattern="): | |
161 | raw = cap[len(b"includepattern=") :] |
|
161 | raw = cap[len(b"includepattern=") :] | |
162 | if raw: |
|
162 | if raw: | |
163 | includepattern = raw.split(b'\0') |
|
163 | includepattern = raw.split(b'\0') | |
164 | elif cap.startswith(b"excludepattern="): |
|
164 | elif cap.startswith(b"excludepattern="): | |
165 | raw = cap[len(b"excludepattern=") :] |
|
165 | raw = cap[len(b"excludepattern=") :] | |
166 | if raw: |
|
166 | if raw: | |
167 | excludepattern = raw.split(b'\0') |
|
167 | excludepattern = raw.split(b'\0') | |
168 | if includepattern or excludepattern: |
|
168 | if includepattern or excludepattern: | |
169 | repo.shallowmatch = match.match( |
|
169 | repo.shallowmatch = match.match( | |
170 | repo.root, b'', None, includepattern, excludepattern |
|
170 | repo.root, b'', None, includepattern, excludepattern | |
171 | ) |
|
171 | ) | |
172 | else: |
|
172 | else: | |
173 | repo.shallowmatch = match.always() |
|
173 | repo.shallowmatch = match.always() | |
174 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
174 | return orig(repo, outgoing, version, source, *args, **kwargs) | |
175 | finally: |
|
175 | finally: | |
176 | repo.shallowmatch = original |
|
176 | repo.shallowmatch = original | |
177 |
|
177 | |||
178 |
|
178 | |||
179 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): |
|
179 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): | |
180 | if not shallowutil.isenabled(repo): |
|
180 | if not shallowutil.isenabled(repo): | |
181 | return orig(repo, source, revmap, trp, expectedfiles, *args) |
|
181 | return orig(repo, source, revmap, trp, expectedfiles, *args) | |
182 |
|
182 | |||
183 | newfiles = 0 |
|
183 | newfiles = 0 | |
184 | visited = set() |
|
184 | visited = set() | |
185 | revisiondatas = {} |
|
185 | revisiondatas = {} | |
186 | queue = [] |
|
186 | queue = [] | |
187 |
|
187 | |||
188 | # Normal Mercurial processes each file one at a time, adding all |
|
188 | # Normal Mercurial processes each file one at a time, adding all | |
189 | # the new revisions for that file at once. In remotefilelog a file |
|
189 | # the new revisions for that file at once. In remotefilelog a file | |
190 | # revision may depend on a different file's revision (in the case |
|
190 | # revision may depend on a different file's revision (in the case | |
191 | # of a rename/copy), so we must lay all revisions down across all |
|
191 | # of a rename/copy), so we must lay all revisions down across all | |
192 | # files in topological order. |
|
192 | # files in topological order. | |
193 |
|
193 | |||
194 | # read all the file chunks but don't add them |
|
194 | # read all the file chunks but don't add them | |
195 | progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles) |
|
195 | progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles) | |
196 | while True: |
|
196 | while True: | |
197 | chunkdata = source.filelogheader() |
|
197 | chunkdata = source.filelogheader() | |
198 | if not chunkdata: |
|
198 | if not chunkdata: | |
199 | break |
|
199 | break | |
200 | f = chunkdata[b"filename"] |
|
200 | f = chunkdata[b"filename"] | |
201 | repo.ui.debug(b"adding %s revisions\n" % f) |
|
201 | repo.ui.debug(b"adding %s revisions\n" % f) | |
202 | progress.increment() |
|
202 | progress.increment() | |
203 |
|
203 | |||
204 | if not repo.shallowmatch(f): |
|
204 | if not repo.shallowmatch(f): | |
205 | fl = repo.file(f) |
|
205 | fl = repo.file(f) | |
206 | deltas = source.deltaiter() |
|
206 | deltas = source.deltaiter() | |
207 | fl.addgroup(deltas, revmap, trp) |
|
207 | fl.addgroup(deltas, revmap, trp) | |
208 | continue |
|
208 | continue | |
209 |
|
209 | |||
210 | chain = None |
|
210 | chain = None | |
211 | while True: |
|
211 | while True: | |
212 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None |
|
212 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None | |
213 | revisiondata = source.deltachunk(chain) |
|
213 | revisiondata = source.deltachunk(chain) | |
214 | if not revisiondata: |
|
214 | if not revisiondata: | |
215 | break |
|
215 | break | |
216 |
|
216 | |||
217 | chain = revisiondata[0] |
|
217 | chain = revisiondata[0] | |
218 |
|
218 | |||
219 | revisiondatas[(f, chain)] = revisiondata |
|
219 | revisiondatas[(f, chain)] = revisiondata | |
220 | queue.append((f, chain)) |
|
220 | queue.append((f, chain)) | |
221 |
|
221 | |||
222 | if f not in visited: |
|
222 | if f not in visited: | |
223 | newfiles += 1 |
|
223 | newfiles += 1 | |
224 | visited.add(f) |
|
224 | visited.add(f) | |
225 |
|
225 | |||
226 | if chain is None: |
|
226 | if chain is None: | |
227 | raise error.Abort(_(b"received file revlog group is empty")) |
|
227 | raise error.Abort(_(b"received file revlog group is empty")) | |
228 |
|
228 | |||
229 | processed = set() |
|
229 | processed = set() | |
230 |
|
230 | |||
231 | def available(f, node, depf, depnode): |
|
231 | def available(f, node, depf, depnode): | |
232 | if depnode != nullid and (depf, depnode) not in processed: |
|
232 | if depnode != nullid and (depf, depnode) not in processed: | |
233 | if not (depf, depnode) in revisiondatas: |
|
233 | if not (depf, depnode) in revisiondatas: | |
234 | # It's not in the changegroup, assume it's already |
|
234 | # It's not in the changegroup, assume it's already | |
235 | # in the repo |
|
235 | # in the repo | |
236 | return True |
|
236 | return True | |
237 | # re-add self to queue |
|
237 | # re-add self to queue | |
238 | queue.insert(0, (f, node)) |
|
238 | queue.insert(0, (f, node)) | |
239 | # add dependency in front |
|
239 | # add dependency in front | |
240 | queue.insert(0, (depf, depnode)) |
|
240 | queue.insert(0, (depf, depnode)) | |
241 | return False |
|
241 | return False | |
242 | return True |
|
242 | return True | |
243 |
|
243 | |||
244 | skipcount = 0 |
|
244 | skipcount = 0 | |
245 |
|
245 | |||
246 | # Prefetch the non-bundled revisions that we will need |
|
246 | # Prefetch the non-bundled revisions that we will need | |
247 | prefetchfiles = [] |
|
247 | prefetchfiles = [] | |
248 | for f, node in queue: |
|
248 | for f, node in queue: | |
249 | revisiondata = revisiondatas[(f, node)] |
|
249 | revisiondata = revisiondatas[(f, node)] | |
250 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
250 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |
251 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] |
|
251 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] | |
252 |
|
252 | |||
253 | for dependent in dependents: |
|
253 | for dependent in dependents: | |
254 | if dependent == nullid or (f, dependent) in revisiondatas: |
|
254 | if dependent == nullid or (f, dependent) in revisiondatas: | |
255 | continue |
|
255 | continue | |
256 | prefetchfiles.append((f, hex(dependent))) |
|
256 | prefetchfiles.append((f, hex(dependent))) | |
257 |
|
257 | |||
258 | repo.fileservice.prefetch(prefetchfiles) |
|
258 | repo.fileservice.prefetch(prefetchfiles) | |
259 |
|
259 | |||
260 | # Apply the revisions in topological order such that a revision |
|
260 | # Apply the revisions in topological order such that a revision | |
261 | # is only written once it's deltabase and parents have been written. |
|
261 | # is only written once it's deltabase and parents have been written. | |
262 | while queue: |
|
262 | while queue: | |
263 | f, node = queue.pop(0) |
|
263 | f, node = queue.pop(0) | |
264 | if (f, node) in processed: |
|
264 | if (f, node) in processed: | |
265 | continue |
|
265 | continue | |
266 |
|
266 | |||
267 | skipcount += 1 |
|
267 | skipcount += 1 | |
268 | if skipcount > len(queue) + 1: |
|
268 | if skipcount > len(queue) + 1: | |
269 | raise error.Abort(_(b"circular node dependency")) |
|
269 | raise error.Abort(_(b"circular node dependency")) | |
270 |
|
270 | |||
271 | fl = repo.file(f) |
|
271 | fl = repo.file(f) | |
272 |
|
272 | |||
273 | revisiondata = revisiondatas[(f, node)] |
|
273 | revisiondata = revisiondatas[(f, node)] | |
274 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
274 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |
275 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata |
|
275 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata | |
276 |
|
276 | |||
277 | if not available(f, node, f, deltabase): |
|
277 | if not available(f, node, f, deltabase): | |
278 | continue |
|
278 | continue | |
279 |
|
279 | |||
280 | base = fl.rawdata(deltabase) |
|
280 | base = fl.rawdata(deltabase) | |
281 | text = mdiff.patch(base, delta) |
|
281 | text = mdiff.patch(base, delta) | |
282 | if not isinstance(text, bytes): |
|
282 | if not isinstance(text, bytes): | |
283 | text = bytes(text) |
|
283 | text = bytes(text) | |
284 |
|
284 | |||
285 | meta, text = shallowutil.parsemeta(text) |
|
285 | meta, text = shallowutil.parsemeta(text) | |
286 | if b'copy' in meta: |
|
286 | if b'copy' in meta: | |
287 | copyfrom = meta[b'copy'] |
|
287 | copyfrom = meta[b'copy'] | |
288 | copynode = bin(meta[b'copyrev']) |
|
288 | copynode = bin(meta[b'copyrev']) | |
289 | if not available(f, node, copyfrom, copynode): |
|
289 | if not available(f, node, copyfrom, copynode): | |
290 | continue |
|
290 | continue | |
291 |
|
291 | |||
292 | for p in [p1, p2]: |
|
292 | for p in [p1, p2]: | |
293 | if p != nullid: |
|
293 | if p != nullid: | |
294 | if not available(f, node, f, p): |
|
294 | if not available(f, node, f, p): | |
295 | continue |
|
295 | continue | |
296 |
|
296 | |||
297 | fl.add(text, meta, trp, linknode, p1, p2) |
|
297 | fl.add(text, meta, trp, linknode, p1, p2) | |
298 | processed.add((f, node)) |
|
298 | processed.add((f, node)) | |
299 | skipcount = 0 |
|
299 | skipcount = 0 | |
300 |
|
300 | |||
301 | progress.complete() |
|
301 | progress.complete() | |
302 |
|
302 | |||
303 | return len(revisiondatas), newfiles |
|
303 | return len(revisiondatas), newfiles |
General Comments 0
You need to be logged in to leave comments.
Login now