Skip to content

Commit

Permalink
Removed limit from optimistic effect
Browse files Browse the repository at this point in the history
  • Loading branch information
lucasbordeau committed Aug 12, 2024
1 parent 8f0defa commit 8cb2f3b
Showing 1 changed file with 1 addition and 52 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ export const triggerUpdateRecordOptimisticEffect = ({
fields: {
[objectMetadataItem.namePlural]: (
rootQueryCachedResponse,
{ DELETE, readField, storeFieldName, toReference },
{ readField, storeFieldName, toReference },
) => {
const shouldSkip = !isObjectRecordConnectionWithRefs(
objectMetadataItem.nameSingular,
Expand All @@ -64,7 +64,6 @@ export const triggerUpdateRecordOptimisticEffect = ({

const rootQueryFilter = rootQueryVariables?.filter;
const rootQueryOrderBy = rootQueryVariables?.orderBy;
const rootQueryLimit = rootQueryVariables?.first;

const shouldTryToMatchFilter = isDefined(rootQueryFilter);

Expand Down Expand Up @@ -123,56 +122,6 @@ export const triggerUpdateRecordOptimisticEffect = ({
});
}

const shouldLimitNextRootQueryEdges = isDefined(rootQueryLimit);

// TODO: not sure that we should trigger a DELETE here, as it will trigger a network request
// Is it the responsibility of this optimistic effect function to delete a root query that will trigger a network request ?
// Shouldn't we let the response from the network overwrite the cache and keep this util purely about cache updates ?
//
// Shoud we even apply the limit at all since with pagination we cannot really do optimistic rendering and should
// wait for the network response to update the cache
//
// Maybe we could apply a merging function instead and exclude limit from the caching field arguments ?
// Also we have a problem that is not yet present with this but may arise if we start
// to use limit arguments, as for now we rely on the hard coded limit of 60 in pg_graphql.
// This is as if we had a { limit: 60 } argument in every query but we don't.
// so Apollo correctly merges the return of fetchMore for now, because of this,
// but wouldn't do it well like Thomas had the problem with mail threads
// because he applied a limit of 2 and Apollo created one root query in the cache for each.
// In Thomas' case we should implement this because he use a hack to overwrite the first request with the return of the other.
// See: https://www.apollographql.com/docs/react/pagination/cursor-based/#relay-style-cursor-pagination
// See: https://www.apollographql.com/docs/react/pagination/core-api/#merging-queries
if (shouldLimitNextRootQueryEdges) {
// If previous edges length was exactly at the required limit,
// but after update next edges length is under the limit,
// we cannot for sure know if re-fetching the query
// would return more edges, so we cannot optimistically deduce
// the query's result.
// In this case, invalidate the cache entry so it can be re-fetched.
const rootQueryCurrentCachedRecordEdgesLengthIsAtLimit =
rootQueryCurrentEdges.length === rootQueryLimit;

// If next edges length is under limit, then we can wait for the network response and merge the result
// then in the merge function we could implement this mechanism to limit the number of edges in the cache
const rootQueryNextCachedRecordEdgesLengthIsUnderLimit =
rootQueryNextEdges.length < rootQueryLimit;

const shouldDeleteRootQuerySoItCanBeRefetched =
rootQueryCurrentCachedRecordEdgesLengthIsAtLimit &&
rootQueryNextCachedRecordEdgesLengthIsUnderLimit;

if (shouldDeleteRootQuerySoItCanBeRefetched) {
return DELETE;
}

const rootQueryNextCachedRecordEdgesLengthIsAboveRootQueryLimit =
rootQueryNextEdges.length > rootQueryLimit;

if (rootQueryNextCachedRecordEdgesLengthIsAboveRootQueryLimit) {
rootQueryNextEdges.splice(rootQueryLimit);
}
}

return {
...rootQueryConnection,
edges: rootQueryNextEdges,
Expand Down

0 comments on commit 8cb2f3b

Please sign in to comment.